aboutsummaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/acpi/dispatcher/dsmethod.c12
-rw-r--r--drivers/acpi/dispatcher/dsopcode.c3
-rw-r--r--drivers/acpi/dispatcher/dsutils.c7
-rw-r--r--drivers/acpi/dispatcher/dswstate.c9
-rw-r--r--drivers/acpi/ec.c39
-rw-r--r--drivers/acpi/events/evgpe.c5
-rw-r--r--drivers/acpi/events/evgpeblk.c3
-rw-r--r--drivers/acpi/events/evmisc.c20
-rw-r--r--drivers/acpi/events/evregion.c15
-rw-r--r--drivers/acpi/events/evrgnini.c3
-rw-r--r--drivers/acpi/events/evxface.c7
-rw-r--r--drivers/acpi/events/evxfevnt.c2
-rw-r--r--drivers/acpi/executer/exconvrt.c5
-rw-r--r--drivers/acpi/executer/excreate.c6
-rw-r--r--drivers/acpi/executer/exdump.c17
-rw-r--r--drivers/acpi/executer/exmutex.c37
-rw-r--r--drivers/acpi/executer/exnames.c3
-rw-r--r--drivers/acpi/executer/exprep.c2
-rw-r--r--drivers/acpi/executer/exresop.c3
-rw-r--r--drivers/acpi/executer/exsystem.c30
-rw-r--r--drivers/acpi/executer/exutils.c104
-rw-r--r--drivers/acpi/hardware/hwsleep.c1
-rw-r--r--drivers/acpi/namespace/nseval.c13
-rw-r--r--drivers/acpi/namespace/nsinit.c7
-rw-r--r--drivers/acpi/namespace/nswalk.c6
-rw-r--r--drivers/acpi/namespace/nsxfeval.c17
-rw-r--r--drivers/acpi/osl.c45
-rw-r--r--drivers/acpi/parser/psopcode.c618
-rw-r--r--drivers/acpi/resources/rscalc.c3
-rw-r--r--drivers/acpi/resources/rscreate.c13
-rw-r--r--drivers/acpi/resources/rsdump.c8
-rw-r--r--drivers/acpi/resources/rsinfo.c2
-rw-r--r--drivers/acpi/resources/rslist.c7
-rw-r--r--drivers/acpi/resources/rsmisc.c4
-rw-r--r--drivers/acpi/resources/rsutils.c6
-rw-r--r--drivers/acpi/resources/rsxface.c3
-rw-r--r--drivers/acpi/sleep/main.c68
-rw-r--r--drivers/acpi/sleep/proc.c13
-rw-r--r--drivers/acpi/tables/tbfadt.c6
-rw-r--r--drivers/acpi/tables/tbxface.c16
-rw-r--r--drivers/acpi/thermal.c104
-rw-r--r--drivers/acpi/utilities/utalloc.c1
-rw-r--r--drivers/acpi/utilities/utcache.c3
-rw-r--r--drivers/acpi/utilities/utcopy.c4
-rw-r--r--drivers/acpi/utilities/utdebug.c4
-rw-r--r--drivers/acpi/utilities/utdelete.c1
-rw-r--r--drivers/acpi/utilities/utglobal.c6
-rw-r--r--drivers/acpi/utilities/utmisc.c6
-rw-r--r--drivers/acpi/utilities/utmutex.c8
-rw-r--r--drivers/acpi/utilities/utresrc.c1
-rw-r--r--drivers/acpi/utilities/utxface.c2
-rw-r--r--drivers/ata/Kconfig3
-rw-r--r--drivers/ata/libata-acpi.c3
-rw-r--r--drivers/ata/libata-core.c10
-rw-r--r--drivers/ata/pata_pcmcia.c1
-rw-r--r--drivers/ata/pata_qdi.c2
-rw-r--r--drivers/ata/pata_scc.c4
-rw-r--r--drivers/ata/sata_nv.c92
-rw-r--r--drivers/ata/sata_promise.c24
-rw-r--r--drivers/ata/sata_via.c8
-rw-r--r--drivers/auxdisplay/Kconfig1
-rw-r--r--drivers/base/devres.c32
-rw-r--r--drivers/base/platform.c2
-rw-r--r--drivers/base/topology.c3
-rw-r--r--drivers/block/Kconfig4
-rw-r--r--drivers/block/loop.c6
-rw-r--r--drivers/block/nbd.c15
-rw-r--r--drivers/block/rd.c2
-rw-r--r--drivers/char/Kconfig14
-rw-r--r--drivers/char/drm/drm_dma.c2
-rw-r--r--drivers/char/drm/drm_vm.c2
-rw-r--r--drivers/char/drm/r300_reg.h2
-rw-r--r--drivers/char/genrtc.c4
-rw-r--r--drivers/char/hw_random/Kconfig14
-rw-r--r--drivers/char/hw_random/Makefile1
-rw-r--r--drivers/char/hw_random/pasemi-rng.c156
-rw-r--r--drivers/char/ipmi/Kconfig2
-rw-r--r--drivers/char/mmtimer.c4
-rw-r--r--drivers/char/pcmcia/Kconfig1
-rw-r--r--drivers/char/pcmcia/cm4000_cs.c46
-rw-r--r--drivers/char/pcmcia/cm4040_cs.c7
-rw-r--r--drivers/char/tpm/Kconfig3
-rw-r--r--drivers/char/tty_io.c22
-rw-r--r--drivers/cpufreq/cpufreq.c3
-rw-r--r--drivers/cpufreq/cpufreq_stats.c2
-rw-r--r--drivers/crypto/Kconfig24
-rw-r--r--drivers/dma/Kconfig1
-rw-r--r--drivers/edac/Kconfig1
-rw-r--r--drivers/firewire/Kconfig61
-rw-r--r--drivers/firewire/Makefile10
-rw-r--r--drivers/firewire/fw-card.c560
-rw-r--r--drivers/firewire/fw-cdev.c961
-rw-r--r--drivers/firewire/fw-device.c813
-rw-r--r--drivers/firewire/fw-device.h146
-rw-r--r--drivers/firewire/fw-iso.c163
-rw-r--r--drivers/firewire/fw-ohci.c1943
-rw-r--r--drivers/firewire/fw-ohci.h153
-rw-r--r--drivers/firewire/fw-sbp2.c1147
-rw-r--r--drivers/firewire/fw-topology.c537
-rw-r--r--drivers/firewire/fw-topology.h92
-rw-r--r--drivers/firewire/fw-transaction.c910
-rw-r--r--drivers/firewire/fw-transaction.h458
-rw-r--r--drivers/hwmon/Kconfig1
-rw-r--r--drivers/hwmon/ams/ams-input.c2
-rw-r--r--drivers/hwmon/applesmc.c2
-rw-r--r--drivers/hwmon/coretemp.c2
-rw-r--r--drivers/hwmon/hdaps.c2
-rw-r--r--drivers/i2c/Kconfig1
-rw-r--r--drivers/i2c/busses/i2c-at91.c7
-rw-r--r--drivers/i2c/busses/i2c-pxa.c2
-rw-r--r--drivers/i2c/chips/tps65010.c2
-rw-r--r--drivers/ide/Kconfig15
-rw-r--r--drivers/ide/Makefile2
-rw-r--r--drivers/ide/arm/bast-ide.c2
-rw-r--r--drivers/ide/arm/icside.c9
-rw-r--r--drivers/ide/arm/ide_arm.c2
-rw-r--r--drivers/ide/arm/rapide.c2
-rw-r--r--drivers/ide/cris/ide-cris.c4
-rw-r--r--drivers/ide/h8300/ide-h8300.c2
-rw-r--r--drivers/ide/ide-cd.c20
-rw-r--r--drivers/ide/ide-disk.c101
-rw-r--r--drivers/ide/ide-dma.c94
-rw-r--r--drivers/ide/ide-floppy.c30
-rw-r--r--drivers/ide/ide-generic.c2
-rw-r--r--drivers/ide/ide-io.c17
-rw-r--r--drivers/ide/ide-iops.c55
-rw-r--r--drivers/ide/ide-lib.c132
-rw-r--r--drivers/ide/ide-pnp.c2
-rw-r--r--drivers/ide/ide-probe.c3
-rw-r--r--drivers/ide/ide-proc.c344
-rw-r--r--drivers/ide/ide-tape.c49
-rw-r--r--drivers/ide/ide.c470
-rw-r--r--drivers/ide/legacy/ali14xx.c3
-rw-r--r--drivers/ide/legacy/buddha.c2
-rw-r--r--drivers/ide/legacy/dtc2278.c3
-rw-r--r--drivers/ide/legacy/falconide.c2
-rw-r--r--drivers/ide/legacy/gayle.c2
-rw-r--r--drivers/ide/legacy/ht6560b.c3
-rw-r--r--drivers/ide/legacy/ide-cs.c2
-rw-r--r--drivers/ide/legacy/macide.c6
-rw-r--r--drivers/ide/legacy/q40ide.c2
-rw-r--r--drivers/ide/legacy/qd65xx.c7
-rw-r--r--drivers/ide/legacy/umc8672.c3
-rw-r--r--drivers/ide/mips/au1xxx-ide.c3
-rw-r--r--drivers/ide/mips/swarm.c3
-rw-r--r--drivers/ide/pci/aec62xx.c62
-rw-r--r--drivers/ide/pci/alim15x3.c97
-rw-r--r--drivers/ide/pci/amd74xx.c6
-rw-r--r--drivers/ide/pci/atiixp.c40
-rw-r--r--drivers/ide/pci/cmd64x.c90
-rw-r--r--drivers/ide/pci/cs5520.c20
-rw-r--r--drivers/ide/pci/cs5535.c33
-rw-r--r--drivers/ide/pci/delkin_cb.c2
-rw-r--r--drivers/ide/pci/hpt34x.c27
-rw-r--r--drivers/ide/pci/hpt366.c85
-rw-r--r--drivers/ide/pci/it8213.c39
-rw-r--r--drivers/ide/pci/it821x.c20
-rw-r--r--drivers/ide/pci/jmicron.c40
-rw-r--r--drivers/ide/pci/pdc202xx_new.c40
-rw-r--r--drivers/ide/pci/pdc202xx_old.c54
-rw-r--r--drivers/ide/pci/piix.c163
-rw-r--r--drivers/ide/pci/scc_pata.c21
-rw-r--r--drivers/ide/pci/serverworks.c31
-rw-r--r--drivers/ide/pci/sgiioc4.c2
-rw-r--r--drivers/ide/pci/siimage.c47
-rw-r--r--drivers/ide/pci/sis5513.c44
-rw-r--r--drivers/ide/pci/slc90e66.c24
-rw-r--r--drivers/ide/pci/tc86c001.c20
-rw-r--r--drivers/ide/pci/triflex.c15
-rw-r--r--drivers/ide/ppc/pmac.c2
-rw-r--r--drivers/ide/setup-pci.c27
-rw-r--r--drivers/ieee1394/Kconfig3
-rw-r--r--drivers/ieee1394/nodemgr.c2
-rw-r--r--drivers/infiniband/Kconfig8
-rw-r--r--drivers/infiniband/Makefile1
-rw-r--r--drivers/infiniband/core/Makefile4
-rw-r--r--drivers/infiniband/core/device.c2
-rw-r--r--drivers/infiniband/core/umem.c (renamed from drivers/infiniband/core/uverbs_mem.c)153
-rw-r--r--drivers/infiniband/core/uverbs.h6
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c60
-rw-r--r--drivers/infiniband/core/uverbs_main.c11
-rw-r--r--drivers/infiniband/hw/amso1100/c2_provider.c42
-rw-r--r--drivers/infiniband/hw/amso1100/c2_provider.h1
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c28
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.h1
-rw-r--r--drivers/infiniband/hw/ehca/ehca_classes.h1
-rw-r--r--drivers/infiniband/hw/ehca/ehca_irq.c6
-rw-r--r--drivers/infiniband/hw/ehca/ehca_iverbs.h3
-rw-r--r--drivers/infiniband/hw/ehca/ehca_mrmw.c69
-rw-r--r--drivers/infiniband/hw/ipath/ipath_mr.c38
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.h5
-rw-r--r--drivers/infiniband/hw/mlx4/Kconfig9
-rw-r--r--drivers/infiniband/hw/mlx4/Makefile3
-rw-r--r--drivers/infiniband/hw/mlx4/ah.c100
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c525
-rw-r--r--drivers/infiniband/hw/mlx4/doorbell.c216
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c339
-rw-r--r--drivers/infiniband/hw/mlx4/main.c651
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h285
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c184
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c1294
-rw-r--r--drivers/infiniband/hw/mlx4/srq.c334
-rw-r--r--drivers/infiniband/hw/mlx4/user.h92
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c38
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.h1
-rw-r--r--drivers/input/Kconfig1
-rw-r--r--drivers/isdn/Kconfig1
-rw-r--r--drivers/isdn/capi/Kconfig2
-rw-r--r--drivers/isdn/hardware/eicon/divasync.h2
-rw-r--r--drivers/isdn/hisax/hfc_usb.c4
-rw-r--r--drivers/kvm/Kconfig1
-rw-r--r--drivers/kvm/kvm_main.c3
-rw-r--r--drivers/leds/Kconfig1
-rw-r--r--drivers/leds/leds-h1940.c2
-rw-r--r--drivers/macintosh/Kconfig2
-rw-r--r--drivers/macintosh/mediabay.c2
-rw-r--r--drivers/mca/mca-bus.c28
-rw-r--r--drivers/mca/mca-driver.c13
-rw-r--r--drivers/md/Kconfig9
-rw-r--r--drivers/md/Makefile1
-rw-r--r--drivers/md/dm-bio-list.h26
-rw-r--r--drivers/md/dm-crypt.c91
-rw-r--r--drivers/md/dm-delay.c383
-rw-r--r--drivers/md/dm-exception-store.c54
-rw-r--r--drivers/md/dm-hw-handler.h1
-rw-r--r--drivers/md/dm-io.c232
-rw-r--r--drivers/md/dm-io.h83
-rw-r--r--drivers/md/dm-log.c77
-rw-r--r--drivers/md/dm-mpath.c3
-rw-r--r--drivers/md/dm-raid1.c187
-rw-r--r--drivers/md/dm-table.c10
-rw-r--r--drivers/md/dm.c1
-rw-r--r--drivers/md/kcopyd.c28
-rw-r--r--drivers/md/md.c160
-rw-r--r--drivers/md/raid1.c33
-rw-r--r--drivers/md/raid5.c4
-rw-r--r--drivers/media/Kconfig1
-rw-r--r--drivers/media/dvb/dvb-usb/dvb-usb-remote.c2
-rw-r--r--drivers/media/dvb/frontends/dib7000m.c2
-rw-r--r--drivers/media/dvb/frontends/dib7000p.c2
-rw-r--r--drivers/media/dvb/frontends/tda10021.c2
-rw-r--r--drivers/media/dvb/frontends/ves1x93.c2
-rw-r--r--drivers/media/video/em28xx/em28xx-i2c.c2
-rw-r--r--drivers/media/video/em28xx/em28xx-video.c2
-rw-r--r--drivers/media/video/pwc/philips.txt6
-rw-r--r--drivers/media/video/usbvideo/vicam.c2
-rw-r--r--drivers/message/fusion/Kconfig1
-rw-r--r--drivers/message/fusion/lsi/mpi_history.txt2
-rw-r--r--drivers/message/fusion/mptbase.c2
-rw-r--r--drivers/message/i2o/Kconfig1
-rw-r--r--drivers/mfd/Kconfig1
-rw-r--r--drivers/misc/Kconfig2
-rw-r--r--drivers/misc/asus-laptop.c66
-rw-r--r--drivers/misc/msi-laptop.c12
-rw-r--r--drivers/misc/sony-laptop.c8
-rw-r--r--drivers/mmc/Kconfig1
-rw-r--r--drivers/mmc/core/core.c4
-rw-r--r--drivers/mtd/Kconfig1
-rw-r--r--drivers/mtd/chips/Kconfig40
-rw-r--r--drivers/mtd/chips/Makefile4
-rw-r--r--drivers/mtd/chips/amd_flash.c1396
-rw-r--r--drivers/mtd/chips/jedec.c935
-rw-r--r--drivers/mtd/chips/sharp.c601
-rw-r--r--drivers/mtd/devices/block2mtd.c2
-rw-r--r--drivers/mtd/maps/Kconfig18
-rw-r--r--drivers/mtd/maps/Makefile2
-rw-r--r--drivers/mtd/maps/arctic-mtd.c145
-rw-r--r--drivers/mtd/maps/beech-mtd.c122
-rw-r--r--drivers/mtd/maps/nettel.c2
-rw-r--r--drivers/mtd/maps/physmap_of.c2
-rw-r--r--drivers/mtd/mtdpart.c1
-rw-r--r--drivers/mtd/nand/Kconfig21
-rw-r--r--drivers/mtd/nand/Makefile2
-rw-r--r--drivers/mtd/nand/at91_nand.c10
-rw-r--r--drivers/mtd/nand/cafe_ecc.c1381
-rw-r--r--drivers/mtd/nand/cafe_nand.c (renamed from drivers/mtd/nand/cafe.c)137
-rw-r--r--drivers/mtd/nand/nand_base.c11
-rw-r--r--drivers/mtd/nand/plat_nand.c150
-rw-r--r--drivers/mtd/onenand/onenand_base.c2
-rw-r--r--drivers/net/3c509.c5
-rw-r--r--drivers/net/3c59x.c2
-rw-r--r--drivers/net/Kconfig16
-rw-r--r--drivers/net/Makefile9
-rw-r--r--drivers/net/atl1/atl1_main.c12
-rw-r--r--drivers/net/atp.c8
-rw-r--r--drivers/net/bonding/bond_main.c2
-rw-r--r--drivers/net/e1000/e1000_main.c2
-rw-r--r--drivers/net/eepro.c2
-rw-r--r--drivers/net/eepro100.c2
-rw-r--r--drivers/net/epic100.c10
-rw-r--r--drivers/net/hamradio/Kconfig2
-rw-r--r--drivers/net/irda/donauboe.h2
-rw-r--r--drivers/net/ixgb/ixgb_ee.c2
-rw-r--r--drivers/net/meth.h2
-rw-r--r--drivers/net/mlx4/Makefile4
-rw-r--r--drivers/net/mlx4/alloc.c179
-rw-r--r--drivers/net/mlx4/catas.c70
-rw-r--r--drivers/net/mlx4/cmd.c429
-rw-r--r--drivers/net/mlx4/cq.c254
-rw-r--r--drivers/net/mlx4/eq.c696
-rw-r--r--drivers/net/mlx4/fw.c775
-rw-r--r--drivers/net/mlx4/fw.h167
-rw-r--r--drivers/net/mlx4/icm.c379
-rw-r--r--drivers/net/mlx4/icm.h135
-rw-r--r--drivers/net/mlx4/intf.c165
-rw-r--r--drivers/net/mlx4/main.c936
-rw-r--r--drivers/net/mlx4/mcg.c380
-rw-r--r--drivers/net/mlx4/mlx4.h348
-rw-r--r--drivers/net/mlx4/mr.c479
-rw-r--r--drivers/net/mlx4/pd.c102
-rw-r--r--drivers/net/mlx4/profile.c238
-rw-r--r--drivers/net/mlx4/qp.c280
-rw-r--r--drivers/net/mlx4/reset.c181
-rw-r--r--drivers/net/mlx4/srq.c227
-rw-r--r--drivers/net/natsemi.c1
-rw-r--r--drivers/net/ne2k-pci.c3
-rw-r--r--drivers/net/pcmcia/ibmtr_cs.c14
-rw-r--r--drivers/net/phy/Kconfig1
-rw-r--r--drivers/net/phy/phy.c6
-rw-r--r--drivers/net/skge.c4
-rw-r--r--drivers/net/sundance.c3
-rw-r--r--drivers/net/tg3.c11
-rw-r--r--drivers/net/tg3.h2
-rw-r--r--drivers/net/tulip/interrupt.c2
-rw-r--r--drivers/net/tulip/winbond-840.c2
-rw-r--r--drivers/net/tulip/xircom_cb.c2
-rw-r--r--drivers/net/typhoon.c2
-rw-r--r--drivers/net/usb/Kconfig (renamed from drivers/usb/net/Kconfig)0
-rw-r--r--drivers/net/usb/Makefile (renamed from drivers/usb/net/Makefile)0
-rw-r--r--drivers/net/usb/asix.c (renamed from drivers/usb/net/asix.c)0
-rw-r--r--drivers/net/usb/catc.c (renamed from drivers/usb/net/catc.c)0
-rw-r--r--drivers/net/usb/cdc_ether.c (renamed from drivers/usb/net/cdc_ether.c)0
-rw-r--r--drivers/net/usb/cdc_subset.c (renamed from drivers/usb/net/cdc_subset.c)0
-rw-r--r--drivers/net/usb/dm9601.c (renamed from drivers/usb/net/dm9601.c)0
-rw-r--r--drivers/net/usb/gl620a.c (renamed from drivers/usb/net/gl620a.c)0
-rw-r--r--drivers/net/usb/kaweth.c (renamed from drivers/usb/net/kaweth.c)0
-rw-r--r--drivers/net/usb/kawethfw.h (renamed from drivers/usb/net/kawethfw.h)0
-rw-r--r--drivers/net/usb/mcs7830.c (renamed from drivers/usb/net/mcs7830.c)0
-rw-r--r--drivers/net/usb/net1080.c (renamed from drivers/usb/net/net1080.c)0
-rw-r--r--drivers/net/usb/pegasus.c (renamed from drivers/usb/net/pegasus.c)0
-rw-r--r--drivers/net/usb/pegasus.h (renamed from drivers/usb/net/pegasus.h)0
-rw-r--r--drivers/net/usb/plusb.c (renamed from drivers/usb/net/plusb.c)0
-rw-r--r--drivers/net/usb/rndis_host.c (renamed from drivers/usb/net/rndis_host.c)0
-rw-r--r--drivers/net/usb/rtl8150.c (renamed from drivers/usb/net/rtl8150.c)0
-rw-r--r--drivers/net/usb/usbnet.c (renamed from drivers/usb/net/usbnet.c)0
-rw-r--r--drivers/net/usb/usbnet.h (renamed from drivers/usb/net/usbnet.h)2
-rw-r--r--drivers/net/usb/zaurus.c (renamed from drivers/usb/net/zaurus.c)0
-rw-r--r--drivers/net/wireless/Kconfig1
-rw-r--r--drivers/net/wireless/airport.c2
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx.h18
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_dma.c4
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_main.c81
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_main.h19
-rw-r--r--drivers/net/wireless/prism54/isl_ioctl.c2
-rw-r--r--drivers/net/wireless/prism54/islpci_dev.c2
-rw-r--r--drivers/net/wireless/wavelan_cs.c4
-rw-r--r--drivers/net/wireless/wavelan_cs.p.h2
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c4
-rw-r--r--drivers/net/yellowfin.c1
-rw-r--r--drivers/parport/Kconfig1
-rw-r--r--drivers/pci/pci-driver.c2
-rw-r--r--drivers/pnp/Kconfig1
-rw-r--r--drivers/rtc/Kconfig3
-rw-r--r--drivers/rtc/rtc-sh.c8
-rw-r--r--drivers/s390/block/Kconfig11
-rw-r--r--drivers/s390/block/dasd.c8
-rw-r--r--drivers/s390/block/dasd_diag.c10
-rw-r--r--drivers/s390/block/dasd_eckd.c6
-rw-r--r--drivers/s390/block/dasd_ioctl.c4
-rw-r--r--drivers/s390/char/Kconfig (renamed from drivers/s390/Kconfig)111
-rw-r--r--drivers/s390/char/monreader.c14
-rw-r--r--drivers/s390/char/raw3270.c5
-rw-r--r--drivers/s390/char/sclp.h3
-rw-r--r--drivers/s390/char/sclp_rw.c2
-rw-r--r--drivers/s390/char/sclp_sdias.c8
-rw-r--r--drivers/s390/char/zcore.c9
-rw-r--r--drivers/s390/cio/css.c3
-rw-r--r--drivers/s390/cio/css.h2
-rw-r--r--drivers/s390/cio/device.c4
-rw-r--r--drivers/s390/cio/device_ops.c11
-rw-r--r--drivers/s390/cio/qdio.c1
-rw-r--r--drivers/s390/net/Kconfig8
-rw-r--r--drivers/s390/net/qeth_main.c2
-rw-r--r--drivers/s390/net/qeth_mpc.c4
-rw-r--r--drivers/s390/scsi/zfcp_aux.c8
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c2
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c2
-rw-r--r--drivers/sbus/char/bpp.c2
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_pci.c2
-rw-r--r--drivers/scsi/aic94xx/Makefile2
-rw-r--r--drivers/scsi/dc395x.c6
-rw-r--r--drivers/scsi/ide-scsi.c30
-rw-r--r--drivers/scsi/scsi_lib.c2
-rw-r--r--drivers/serial/Kconfig1
-rw-r--r--drivers/spi/Kconfig1
-rw-r--r--drivers/spi/atmel_spi.c5
-rw-r--r--drivers/telephony/Kconfig1
-rw-r--r--drivers/usb/Kconfig3
-rw-r--r--drivers/usb/Makefile7
-rw-r--r--drivers/usb/atm/usbatm.c2
-rw-r--r--drivers/usb/misc/auerswald.c2
-rw-r--r--drivers/usb/serial/Kconfig2
-rw-r--r--drivers/usb/serial/aircable.c4
-rw-r--r--drivers/usb/serial/io_edgeport.c4
-rw-r--r--drivers/video/Kconfig29
-rw-r--r--drivers/video/Makefile2
-rw-r--r--drivers/video/arkfb.c1200
-rw-r--r--drivers/video/console/softcursor.c2
-rw-r--r--drivers/video/fbmem.c4
-rw-r--r--drivers/video/i810/i810_main.c2
-rw-r--r--drivers/video/matrox/matroxfb_Ti3026.c2
-rw-r--r--drivers/video/matrox/matroxfb_accel.c2
-rw-r--r--drivers/video/matrox/matroxfb_base.c2
-rw-r--r--drivers/video/matrox/matroxfb_misc.c2
-rw-r--r--drivers/video/nvidia/nv_hw.c7
-rw-r--r--drivers/video/nvidia/nvidia.c1
-rw-r--r--drivers/video/s3fb.c19
-rw-r--r--drivers/video/skeletonfb.c2
-rw-r--r--drivers/video/svgalib.c17
-rw-r--r--drivers/video/vt8623fb.c927
-rw-r--r--drivers/w1/Kconfig1
422 files changed, 24690 insertions, 7901 deletions
diff --git a/drivers/Makefile b/drivers/Makefile
index 26ca9031ea4..adad2f3d438 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -36,6 +36,7 @@ obj-$(CONFIG_FC4) += fc4/
obj-$(CONFIG_SCSI) += scsi/
obj-$(CONFIG_ATA) += ata/
obj-$(CONFIG_FUSION) += message/
+obj-$(CONFIG_FIREWIRE) += firewire/
obj-$(CONFIG_IEEE1394) += ieee1394/
obj-y += cdrom/
obj-y += auxdisplay/
diff --git a/drivers/acpi/dispatcher/dsmethod.c b/drivers/acpi/dispatcher/dsmethod.c
index 1683e5c5b94..1cbe6190582 100644
--- a/drivers/acpi/dispatcher/dsmethod.c
+++ b/drivers/acpi/dispatcher/dsmethod.c
@@ -231,8 +231,10 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node,
* Obtain the method mutex if necessary. Do not acquire mutex for a
* recursive call.
*/
- if (acpi_os_get_thread_id() !=
- obj_desc->method.mutex->mutex.owner_thread_id) {
+ if (!walk_state ||
+ !obj_desc->method.mutex->mutex.owner_thread ||
+ (walk_state->thread !=
+ obj_desc->method.mutex->mutex.owner_thread)) {
/*
* Acquire the method mutex. This releases the interpreter if we
* block (and reacquires it before it returns)
@@ -246,14 +248,14 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node,
}
/* Update the mutex and walk info and save the original sync_level */
- obj_desc->method.mutex->mutex.owner_thread_id =
- acpi_os_get_thread_id();
if (walk_state) {
obj_desc->method.mutex->mutex.
original_sync_level =
walk_state->thread->current_sync_level;
+ obj_desc->method.mutex->mutex.owner_thread =
+ walk_state->thread;
walk_state->thread->current_sync_level =
obj_desc->method.sync_level;
} else {
@@ -567,7 +569,7 @@ acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
acpi_os_release_mutex(method_desc->method.mutex->mutex.
os_mutex);
- method_desc->method.mutex->mutex.owner_thread_id = ACPI_MUTEX_NOT_ACQUIRED;
+ method_desc->method.mutex->mutex.owner_thread = NULL;
}
}
diff --git a/drivers/acpi/dispatcher/dsopcode.c b/drivers/acpi/dispatcher/dsopcode.c
index 6c6104a7a24..fc9da4879cb 100644
--- a/drivers/acpi/dispatcher/dsopcode.c
+++ b/drivers/acpi/dispatcher/dsopcode.c
@@ -866,8 +866,7 @@ acpi_ds_eval_data_object_operands(struct acpi_walk_state *walk_state,
((op->common.parent->common.aml_opcode != AML_PACKAGE_OP) &&
(op->common.parent->common.aml_opcode !=
AML_VAR_PACKAGE_OP)
- && (op->common.parent->common.aml_opcode !=
- AML_NAME_OP))) {
+ && (op->common.parent->common.aml_opcode != AML_NAME_OP))) {
walk_state->result_obj = obj_desc;
}
}
diff --git a/drivers/acpi/dispatcher/dsutils.c b/drivers/acpi/dispatcher/dsutils.c
index e4073e05a75..71503c036f7 100644
--- a/drivers/acpi/dispatcher/dsutils.c
+++ b/drivers/acpi/dispatcher/dsutils.c
@@ -556,10 +556,9 @@ acpi_ds_create_operand(struct acpi_walk_state *walk_state,
* indicate this to the interpreter, set the
* object to the root
*/
- obj_desc =
- ACPI_CAST_PTR(union
- acpi_operand_object,
- acpi_gbl_root_node);
+ obj_desc = ACPI_CAST_PTR(union
+ acpi_operand_object,
+ acpi_gbl_root_node);
status = AE_OK;
} else {
/*
diff --git a/drivers/acpi/dispatcher/dswstate.c b/drivers/acpi/dispatcher/dswstate.c
index 16c8e38b51e..5afcdd9c744 100644
--- a/drivers/acpi/dispatcher/dswstate.c
+++ b/drivers/acpi/dispatcher/dswstate.c
@@ -630,12 +630,9 @@ struct acpi_walk_state *acpi_ds_pop_walk_state(struct acpi_thread_state *thread)
*
******************************************************************************/
-struct acpi_walk_state *acpi_ds_create_walk_state(acpi_owner_id owner_id,
- union acpi_parse_object
- *origin,
- union acpi_operand_object
- *method_desc,
- struct acpi_thread_state
+struct acpi_walk_state *acpi_ds_create_walk_state(acpi_owner_id owner_id, union acpi_parse_object
+ *origin, union acpi_operand_object
+ *method_desc, struct acpi_thread_state
*thread)
{
struct acpi_walk_state *walk_state;
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index e08cf98f504..82f496c0767 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -147,9 +147,10 @@ static inline int acpi_ec_check_status(struct acpi_ec *ec, enum ec_event event,
return 0;
}
-static int acpi_ec_wait(struct acpi_ec *ec, enum ec_event event, unsigned count)
+static int acpi_ec_wait(struct acpi_ec *ec, enum ec_event event,
+ unsigned count, int force_poll)
{
- if (acpi_ec_mode == EC_POLL) {
+ if (unlikely(force_poll) || acpi_ec_mode == EC_POLL) {
unsigned long delay = jiffies + msecs_to_jiffies(ACPI_EC_DELAY);
while (time_before(jiffies, delay)) {
if (acpi_ec_check_status(ec, event, 0))
@@ -173,14 +174,15 @@ static int acpi_ec_wait(struct acpi_ec *ec, enum ec_event event, unsigned count)
static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, u8 command,
const u8 * wdata, unsigned wdata_len,
- u8 * rdata, unsigned rdata_len)
+ u8 * rdata, unsigned rdata_len,
+ int force_poll)
{
int result = 0;
unsigned count = atomic_read(&ec->event_count);
acpi_ec_write_cmd(ec, command);
for (; wdata_len > 0; --wdata_len) {
- result = acpi_ec_wait(ec, ACPI_EC_EVENT_IBF_0, count);
+ result = acpi_ec_wait(ec, ACPI_EC_EVENT_IBF_0, count, force_poll);
if (result) {
printk(KERN_ERR PREFIX
"write_cmd timeout, command = %d\n", command);
@@ -191,7 +193,7 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, u8 command,
}
if (!rdata_len) {
- result = acpi_ec_wait(ec, ACPI_EC_EVENT_IBF_0, count);
+ result = acpi_ec_wait(ec, ACPI_EC_EVENT_IBF_0, count, force_poll);
if (result) {
printk(KERN_ERR PREFIX
"finish-write timeout, command = %d\n", command);
@@ -202,7 +204,7 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, u8 command,
}
for (; rdata_len > 0; --rdata_len) {
- result = acpi_ec_wait(ec, ACPI_EC_EVENT_OBF_1, count);
+ result = acpi_ec_wait(ec, ACPI_EC_EVENT_OBF_1, count, force_poll);
if (result) {
printk(KERN_ERR PREFIX "read timeout, command = %d\n",
command);
@@ -217,7 +219,8 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, u8 command,
static int acpi_ec_transaction(struct acpi_ec *ec, u8 command,
const u8 * wdata, unsigned wdata_len,
- u8 * rdata, unsigned rdata_len)
+ u8 * rdata, unsigned rdata_len,
+ int force_poll)
{
int status;
u32 glk;
@@ -240,7 +243,7 @@ static int acpi_ec_transaction(struct acpi_ec *ec, u8 command,
/* Make sure GPE is enabled before doing transaction */
acpi_enable_gpe(NULL, ec->gpe, ACPI_NOT_ISR);
- status = acpi_ec_wait(ec, ACPI_EC_EVENT_IBF_0, 0);
+ status = acpi_ec_wait(ec, ACPI_EC_EVENT_IBF_0, 0, 0);
if (status) {
printk(KERN_DEBUG PREFIX
"input buffer is not empty, aborting transaction\n");
@@ -249,7 +252,8 @@ static int acpi_ec_transaction(struct acpi_ec *ec, u8 command,
status = acpi_ec_transaction_unlocked(ec, command,
wdata, wdata_len,
- rdata, rdata_len);
+ rdata, rdata_len,
+ force_poll);
end:
@@ -267,12 +271,12 @@ static int acpi_ec_transaction(struct acpi_ec *ec, u8 command,
int acpi_ec_burst_enable(struct acpi_ec *ec)
{
u8 d;
- return acpi_ec_transaction(ec, ACPI_EC_BURST_ENABLE, NULL, 0, &d, 1);
+ return acpi_ec_transaction(ec, ACPI_EC_BURST_ENABLE, NULL, 0, &d, 1, 0);
}
int acpi_ec_burst_disable(struct acpi_ec *ec)
{
- return acpi_ec_transaction(ec, ACPI_EC_BURST_DISABLE, NULL, 0, NULL, 0);
+ return acpi_ec_transaction(ec, ACPI_EC_BURST_DISABLE, NULL, 0, NULL, 0, 0);
}
static int acpi_ec_read(struct acpi_ec *ec, u8 address, u8 * data)
@@ -281,7 +285,7 @@ static int acpi_ec_read(struct acpi_ec *ec, u8 address, u8 * data)
u8 d;
result = acpi_ec_transaction(ec, ACPI_EC_COMMAND_READ,
- &address, 1, &d, 1);
+ &address, 1, &d, 1, 0);
*data = d;
return result;
}
@@ -290,7 +294,7 @@ static int acpi_ec_write(struct acpi_ec *ec, u8 address, u8 data)
{
u8 wdata[2] = { address, data };
return acpi_ec_transaction(ec, ACPI_EC_COMMAND_WRITE,
- wdata, 2, NULL, 0);
+ wdata, 2, NULL, 0, 0);
}
/*
@@ -349,13 +353,15 @@ EXPORT_SYMBOL(ec_write);
int ec_transaction(u8 command,
const u8 * wdata, unsigned wdata_len,
- u8 * rdata, unsigned rdata_len)
+ u8 * rdata, unsigned rdata_len,
+ int force_poll)
{
if (!first_ec)
return -ENODEV;
return acpi_ec_transaction(first_ec, command, wdata,
- wdata_len, rdata, rdata_len);
+ wdata_len, rdata, rdata_len,
+ force_poll);
}
EXPORT_SYMBOL(ec_transaction);
@@ -374,7 +380,7 @@ static int acpi_ec_query(struct acpi_ec *ec, u8 * data)
* bit to be cleared (and thus clearing the interrupt source).
*/
- result = acpi_ec_transaction(ec, ACPI_EC_COMMAND_QUERY, NULL, 0, &d, 1);
+ result = acpi_ec_transaction(ec, ACPI_EC_COMMAND_QUERY, NULL, 0, &d, 1, 0);
if (result)
return result;
@@ -410,6 +416,7 @@ static u32 acpi_ec_gpe_handler(void *data)
acpi_status status = AE_OK;
u8 value;
struct acpi_ec *ec = data;
+
atomic_inc(&ec->event_count);
if (acpi_ec_mode == EC_INTR) {
diff --git a/drivers/acpi/events/evgpe.c b/drivers/acpi/events/evgpe.c
index 635ba449ebc..e22f4a973c0 100644
--- a/drivers/acpi/events/evgpe.c
+++ b/drivers/acpi/events/evgpe.c
@@ -341,9 +341,8 @@ struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device,
/* A Non-NULL gpe_device means this is a GPE Block Device */
- obj_desc =
- acpi_ns_get_attached_object((struct acpi_namespace_node *)
- gpe_device);
+ obj_desc = acpi_ns_get_attached_object((struct acpi_namespace_node *)
+ gpe_device);
if (!obj_desc || !obj_desc->device.gpe_block) {
return (NULL);
}
diff --git a/drivers/acpi/events/evgpeblk.c b/drivers/acpi/events/evgpeblk.c
index ad5bc76edf4..902c287b3a4 100644
--- a/drivers/acpi/events/evgpeblk.c
+++ b/drivers/acpi/events/evgpeblk.c
@@ -1033,8 +1033,7 @@ acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device,
if (((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
ACPI_GPE_DISPATCH_METHOD)
- && (gpe_event_info->
- flags & ACPI_GPE_TYPE_RUNTIME)) {
+ && (gpe_event_info->flags & ACPI_GPE_TYPE_RUNTIME)) {
gpe_enabled_count++;
}
diff --git a/drivers/acpi/events/evmisc.c b/drivers/acpi/events/evmisc.c
index cae786ca860..21cb749d0c7 100644
--- a/drivers/acpi/events/evmisc.c
+++ b/drivers/acpi/events/evmisc.c
@@ -196,15 +196,12 @@ acpi_ev_queue_notify_request(struct acpi_namespace_node * node,
notify_info->notify.value = (u16) notify_value;
notify_info->notify.handler_obj = handler_obj;
- acpi_ex_exit_interpreter();
-
- acpi_ev_notify_dispatch(notify_info);
-
- status = acpi_ex_enter_interpreter();
+ status =
+ acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_ev_notify_dispatch,
+ notify_info);
if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
+ acpi_ut_delete_generic_state(notify_info);
}
-
}
if (!handler_obj) {
@@ -323,8 +320,9 @@ static u32 acpi_ev_global_lock_handler(void *context)
acpi_gbl_global_lock_acquired = TRUE;
/* Send a unit to the semaphore */
- if (ACPI_FAILURE(acpi_os_signal_semaphore(
- acpi_gbl_global_lock_semaphore, 1))) {
+ if (ACPI_FAILURE
+ (acpi_os_signal_semaphore
+ (acpi_gbl_global_lock_semaphore, 1))) {
ACPI_ERROR((AE_INFO,
"Could not signal Global Lock semaphore"));
}
@@ -450,7 +448,9 @@ acpi_status acpi_ev_acquire_global_lock(u16 timeout)
}
if (ACPI_FAILURE(status)) {
- status = acpi_ex_system_wait_mutex(acpi_gbl_global_lock_mutex, timeout);
+ status =
+ acpi_ex_system_wait_mutex(acpi_gbl_global_lock_mutex,
+ timeout);
}
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
diff --git a/drivers/acpi/events/evregion.c b/drivers/acpi/events/evregion.c
index 96b0e843174..e99f0c435a4 100644
--- a/drivers/acpi/events/evregion.c
+++ b/drivers/acpi/events/evregion.c
@@ -291,7 +291,6 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
u32 bit_width, acpi_integer * value)
{
acpi_status status;
- acpi_status status2;
acpi_adr_space_handler handler;
acpi_adr_space_setup region_setup;
union acpi_operand_object *handler_desc;
@@ -345,7 +344,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
* setup will potentially execute control methods
* (e.g., _REG method for this region)
*/
- acpi_ex_exit_interpreter();
+ acpi_ex_relinquish_interpreter();
status = region_setup(region_obj, ACPI_REGION_ACTIVATE,
handler_desc->address_space.context,
@@ -353,10 +352,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
/* Re-enter the interpreter */
- status2 = acpi_ex_enter_interpreter();
- if (ACPI_FAILURE(status2)) {
- return_ACPI_STATUS(status2);
- }
+ acpi_ex_reacquire_interpreter();
/* Check for failure of the Region Setup */
@@ -409,7 +405,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
* exit the interpreter because the handler *might* block -- we don't
* know what it will do, so we can't hold the lock on the intepreter.
*/
- acpi_ex_exit_interpreter();
+ acpi_ex_relinquish_interpreter();
}
/* Call the handler */
@@ -430,10 +426,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
* We just returned from a non-default handler, we must re-enter the
* interpreter
*/
- status2 = acpi_ex_enter_interpreter();
- if (ACPI_FAILURE(status2)) {
- return_ACPI_STATUS(status2);
- }
+ acpi_ex_reacquire_interpreter();
}
return_ACPI_STATUS(status);
diff --git a/drivers/acpi/events/evrgnini.c b/drivers/acpi/events/evrgnini.c
index a4fa7e6822a..400d90fca96 100644
--- a/drivers/acpi/events/evrgnini.c
+++ b/drivers/acpi/events/evrgnini.c
@@ -228,7 +228,8 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
/* Install a handler for this PCI root bridge */
- status = acpi_install_address_space_handler((acpi_handle) pci_root_node, ACPI_ADR_SPACE_PCI_CONFIG, ACPI_DEFAULT_HANDLER, NULL, NULL);
+ status =
+ acpi_install_address_space_handler((acpi_handle) pci_root_node, ACPI_ADR_SPACE_PCI_CONFIG, ACPI_DEFAULT_HANDLER, NULL, NULL);
if (ACPI_FAILURE(status)) {
if (status == AE_SAME_HANDLER) {
/*
diff --git a/drivers/acpi/events/evxface.c b/drivers/acpi/events/evxface.c
index a3379bafa67..6d866a01f5f 100644
--- a/drivers/acpi/events/evxface.c
+++ b/drivers/acpi/events/evxface.c
@@ -91,7 +91,6 @@ acpi_status acpi_install_exception_handler(acpi_exception_handler handler)
ACPI_EXPORT_SYMBOL(acpi_install_exception_handler)
#endif /* ACPI_FUTURE_USAGE */
-
/*******************************************************************************
*
* FUNCTION: acpi_install_fixed_event_handler
@@ -768,11 +767,9 @@ acpi_status acpi_acquire_global_lock(u16 timeout, u32 * handle)
return (AE_BAD_PARAMETER);
}
- status = acpi_ex_enter_interpreter();
- if (ACPI_FAILURE(status)) {
- return (status);
- }
+ /* Must lock interpreter to prevent race conditions */
+ acpi_ex_enter_interpreter();
status = acpi_ev_acquire_global_lock(timeout);
acpi_ex_exit_interpreter();
diff --git a/drivers/acpi/events/evxfevnt.c b/drivers/acpi/events/evxfevnt.c
index 17065e98807..9cbd3414a57 100644
--- a/drivers/acpi/events/evxfevnt.c
+++ b/drivers/acpi/events/evxfevnt.c
@@ -472,7 +472,6 @@ acpi_status acpi_clear_gpe(acpi_handle gpe_device, u32 gpe_number, u32 flags)
}
ACPI_EXPORT_SYMBOL(acpi_clear_gpe)
-
#ifdef ACPI_FUTURE_USAGE
/*******************************************************************************
*
@@ -568,7 +567,6 @@ acpi_get_gpe_status(acpi_handle gpe_device,
ACPI_EXPORT_SYMBOL(acpi_get_gpe_status)
#endif /* ACPI_FUTURE_USAGE */
-
/*******************************************************************************
*
* FUNCTION: acpi_install_gpe_block
diff --git a/drivers/acpi/executer/exconvrt.c b/drivers/acpi/executer/exconvrt.c
index d470e8b1f4e..79f2c0d42c0 100644
--- a/drivers/acpi/executer/exconvrt.c
+++ b/drivers/acpi/executer/exconvrt.c
@@ -512,9 +512,8 @@ acpi_ex_convert_to_string(union acpi_operand_object * obj_desc,
* Create a new string object and string buffer
* (-1 because of extra separator included in string_length from above)
*/
- return_desc =
- acpi_ut_create_string_object((acpi_size)
- (string_length - 1));
+ return_desc = acpi_ut_create_string_object((acpi_size)
+ (string_length - 1));
if (!return_desc) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
diff --git a/drivers/acpi/executer/excreate.c b/drivers/acpi/executer/excreate.c
index ae97812681a..6e9a23e47fe 100644
--- a/drivers/acpi/executer/excreate.c
+++ b/drivers/acpi/executer/excreate.c
@@ -50,7 +50,6 @@
#define _COMPONENT ACPI_EXECUTER
ACPI_MODULE_NAME("excreate")
-
#ifndef ACPI_NO_METHOD_EXECUTION
/*******************************************************************************
*
@@ -583,10 +582,7 @@ acpi_ex_create_method(u8 * aml_start,
* Get the sync_level. If method is serialized, a mutex will be
* created for this method when it is parsed.
*/
- if (acpi_gbl_all_methods_serialized) {
- obj_desc->method.sync_level = 0;
- obj_desc->method.method_flags |= AML_METHOD_SERIALIZED;
- } else if (method_flags & AML_METHOD_SERIALIZED) {
+ if (method_flags & AML_METHOD_SERIALIZED) {
/*
* ACPI 1.0: sync_level = 0
* ACPI 2.0: sync_level = sync_level in method declaration
diff --git a/drivers/acpi/executer/exdump.c b/drivers/acpi/executer/exdump.c
index 1a73c14df2c..51c9c29987c 100644
--- a/drivers/acpi/executer/exdump.c
+++ b/drivers/acpi/executer/exdump.c
@@ -134,7 +134,7 @@ static struct acpi_exdump_info acpi_ex_dump_method[8] = {
static struct acpi_exdump_info acpi_ex_dump_mutex[5] = {
{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_mutex), NULL},
{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(mutex.sync_level), "Sync Level"},
- {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(mutex.owner_thread_id), "Owner Thread"},
+ {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(mutex.owner_thread), "Owner Thread"},
{ACPI_EXD_UINT16, ACPI_EXD_OFFSET(mutex.acquisition_depth),
"Acquire Depth"},
{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(mutex.os_mutex), "OsMutex"}
@@ -451,9 +451,8 @@ void acpi_ex_dump_operand(union acpi_operand_object *obj_desc, u32 depth)
ACPI_FUNCTION_NAME(ex_dump_operand)
- if (!
- ((ACPI_LV_EXEC & acpi_dbg_level)
- && (_COMPONENT & acpi_dbg_layer))) {
+ if (!((ACPI_LV_EXEC & acpi_dbg_level)
+ && (_COMPONENT & acpi_dbg_layer))) {
return;
}
@@ -844,9 +843,8 @@ void acpi_ex_dump_namespace_node(struct acpi_namespace_node *node, u32 flags)
ACPI_FUNCTION_ENTRY();
if (!flags) {
- if (!
- ((ACPI_LV_OBJECTS & acpi_dbg_level)
- && (_COMPONENT & acpi_dbg_layer))) {
+ if (!((ACPI_LV_OBJECTS & acpi_dbg_level)
+ && (_COMPONENT & acpi_dbg_layer))) {
return;
}
}
@@ -1011,9 +1009,8 @@ acpi_ex_dump_object_descriptor(union acpi_operand_object *obj_desc, u32 flags)
}
if (!flags) {
- if (!
- ((ACPI_LV_OBJECTS & acpi_dbg_level)
- && (_COMPONENT & acpi_dbg_layer))) {
+ if (!((ACPI_LV_OBJECTS & acpi_dbg_level)
+ && (_COMPONENT & acpi_dbg_layer))) {
return_VOID;
}
}
diff --git a/drivers/acpi/executer/exmutex.c b/drivers/acpi/executer/exmutex.c
index 4eb883bda6a..6748e3ef099 100644
--- a/drivers/acpi/executer/exmutex.c
+++ b/drivers/acpi/executer/exmutex.c
@@ -66,9 +66,10 @@ acpi_ex_link_mutex(union acpi_operand_object *obj_desc,
*
******************************************************************************/
-void acpi_ex_unlink_mutex(union acpi_operand_object *obj_desc,
- struct acpi_thread_state *thread)
+void acpi_ex_unlink_mutex(union acpi_operand_object *obj_desc)
{
+ struct acpi_thread_state *thread = obj_desc->mutex.owner_thread;
+
if (!thread) {
return;
}
@@ -173,13 +174,16 @@ acpi_ex_acquire_mutex(union acpi_operand_object *time_desc,
/* Support for multiple acquires by the owning thread */
- if (obj_desc->mutex.owner_thread_id == acpi_os_get_thread_id()) {
- /*
- * The mutex is already owned by this thread, just increment the
- * acquisition depth
- */
- obj_desc->mutex.acquisition_depth++;
- return_ACPI_STATUS(AE_OK);
+ if (obj_desc->mutex.owner_thread) {
+ if (obj_desc->mutex.owner_thread->thread_id ==
+ walk_state->thread->thread_id) {
+ /*
+ * The mutex is already owned by this thread, just increment the
+ * acquisition depth
+ */
+ obj_desc->mutex.acquisition_depth++;
+ return_ACPI_STATUS(AE_OK);
+ }
}
/* Acquire the mutex, wait if necessary. Special case for Global Lock */
@@ -202,7 +206,7 @@ acpi_ex_acquire_mutex(union acpi_operand_object *time_desc,
/* Have the mutex: update mutex and walk info and save the sync_level */
- obj_desc->mutex.owner_thread_id = acpi_os_get_thread_id();
+ obj_desc->mutex.owner_thread = walk_state->thread;
obj_desc->mutex.acquisition_depth = 1;
obj_desc->mutex.original_sync_level =
walk_state->thread->current_sync_level;
@@ -242,7 +246,7 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
/* The mutex must have been previously acquired in order to release it */
- if (!obj_desc->mutex.owner_thread_id) {
+ if (!obj_desc->mutex.owner_thread) {
ACPI_ERROR((AE_INFO,
"Cannot release Mutex [%4.4s], not acquired",
acpi_ut_get_node_name(obj_desc->mutex.node)));
@@ -262,14 +266,15 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
* The Mutex is owned, but this thread must be the owner.
* Special case for Global Lock, any thread can release
*/
- if ((obj_desc->mutex.owner_thread_id !=
+ if ((obj_desc->mutex.owner_thread->thread_id !=
walk_state->thread->thread_id)
&& (obj_desc->mutex.os_mutex != acpi_gbl_global_lock_mutex)) {
ACPI_ERROR((AE_INFO,
"Thread %lX cannot release Mutex [%4.4s] acquired by thread %lX",
(unsigned long)walk_state->thread->thread_id,
acpi_ut_get_node_name(obj_desc->mutex.node),
- (unsigned long)obj_desc->mutex.owner_thread_id));
+ (unsigned long)obj_desc->mutex.owner_thread->
+ thread_id));
return_ACPI_STATUS(AE_AML_NOT_OWNER);
}
@@ -296,7 +301,7 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
/* Unlink the mutex from the owner's list */
- acpi_ex_unlink_mutex(obj_desc, walk_state->thread);
+ acpi_ex_unlink_mutex(obj_desc);
/* Release the mutex, special case for Global Lock */
@@ -308,7 +313,7 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
/* Update the mutex and restore sync_level */
- obj_desc->mutex.owner_thread_id = ACPI_MUTEX_NOT_ACQUIRED;
+ obj_desc->mutex.owner_thread = NULL;
walk_state->thread->current_sync_level =
obj_desc->mutex.original_sync_level;
@@ -363,7 +368,7 @@ void acpi_ex_release_all_mutexes(struct acpi_thread_state *thread)
/* Mark mutex unowned */
- obj_desc->mutex.owner_thread_id = ACPI_MUTEX_NOT_ACQUIRED;
+ obj_desc->mutex.owner_thread = NULL;
/* Update Thread sync_level (Last mutex is the important one) */
diff --git a/drivers/acpi/executer/exnames.c b/drivers/acpi/executer/exnames.c
index 1ee4fb1175c..308eae52dc0 100644
--- a/drivers/acpi/executer/exnames.c
+++ b/drivers/acpi/executer/exnames.c
@@ -177,8 +177,7 @@ static acpi_status acpi_ex_name_segment(u8 ** in_aml_address, char *name_string)
ACPI_DEBUG_PRINT((ACPI_DB_LOAD, "Bytes from stream:\n"));
- for (index = 0;
- (index < ACPI_NAME_SIZE)
+ for (index = 0; (index < ACPI_NAME_SIZE)
&& (acpi_ut_valid_acpi_char(*aml_address, 0)); index++) {
char_buf[index] = *aml_address++;
ACPI_DEBUG_PRINT((ACPI_DB_LOAD, "%c\n", char_buf[index]));
diff --git a/drivers/acpi/executer/exprep.c b/drivers/acpi/executer/exprep.c
index a6696621ff1..efe5d4b461a 100644
--- a/drivers/acpi/executer/exprep.c
+++ b/drivers/acpi/executer/exprep.c
@@ -242,7 +242,7 @@ acpi_ex_decode_field_access(union acpi_operand_object *obj_desc,
obj_desc->common_field.bit_length,
0xFFFFFFFF
/* Temp until we pass region_length as parameter */
- );
+ );
bit_length = byte_alignment * 8;
#endif
diff --git a/drivers/acpi/executer/exresop.c b/drivers/acpi/executer/exresop.c
index ba761862a59..09d897b3f6d 100644
--- a/drivers/acpi/executer/exresop.c
+++ b/drivers/acpi/executer/exresop.c
@@ -354,8 +354,7 @@ acpi_ex_resolve_operands(u16 opcode,
if ((opcode == AML_STORE_OP) &&
(ACPI_GET_OBJECT_TYPE(*stack_ptr) ==
ACPI_TYPE_LOCAL_REFERENCE)
- && ((*stack_ptr)->reference.opcode ==
- AML_INDEX_OP)) {
+ && ((*stack_ptr)->reference.opcode == AML_INDEX_OP)) {
goto next_operand;
}
break;
diff --git a/drivers/acpi/executer/exsystem.c b/drivers/acpi/executer/exsystem.c
index b2edf620ba8..9460baff303 100644
--- a/drivers/acpi/executer/exsystem.c
+++ b/drivers/acpi/executer/exsystem.c
@@ -66,7 +66,6 @@ ACPI_MODULE_NAME("exsystem")
acpi_status acpi_ex_system_wait_semaphore(acpi_semaphore semaphore, u16 timeout)
{
acpi_status status;
- acpi_status status2;
ACPI_FUNCTION_TRACE(ex_system_wait_semaphore);
@@ -79,7 +78,7 @@ acpi_status acpi_ex_system_wait_semaphore(acpi_semaphore semaphore, u16 timeout)
/* We must wait, so unlock the interpreter */
- acpi_ex_exit_interpreter();
+ acpi_ex_relinquish_interpreter();
status = acpi_os_wait_semaphore(semaphore, 1, timeout);
@@ -89,13 +88,7 @@ acpi_status acpi_ex_system_wait_semaphore(acpi_semaphore semaphore, u16 timeout)
/* Reacquire the interpreter */
- status2 = acpi_ex_enter_interpreter();
- if (ACPI_FAILURE(status2)) {
-
- /* Report fatal error, could not acquire interpreter */
-
- return_ACPI_STATUS(status2);
- }
+ acpi_ex_reacquire_interpreter();
}
return_ACPI_STATUS(status);
@@ -119,7 +112,6 @@ acpi_status acpi_ex_system_wait_semaphore(acpi_semaphore semaphore, u16 timeout)
acpi_status acpi_ex_system_wait_mutex(acpi_mutex mutex, u16 timeout)
{
acpi_status status;
- acpi_status status2;
ACPI_FUNCTION_TRACE(ex_system_wait_mutex);
@@ -132,7 +124,7 @@ acpi_status acpi_ex_system_wait_mutex(acpi_mutex mutex, u16 timeout)
/* We must wait, so unlock the interpreter */
- acpi_ex_exit_interpreter();
+ acpi_ex_relinquish_interpreter();
status = acpi_os_acquire_mutex(mutex, timeout);
@@ -142,13 +134,7 @@ acpi_status acpi_ex_system_wait_mutex(acpi_mutex mutex, u16 timeout)
/* Reacquire the interpreter */
- status2 = acpi_ex_enter_interpreter();
- if (ACPI_FAILURE(status2)) {
-
- /* Report fatal error, could not acquire interpreter */
-
- return_ACPI_STATUS(status2);
- }
+ acpi_ex_reacquire_interpreter();
}
return_ACPI_STATUS(status);
@@ -209,20 +195,18 @@ acpi_status acpi_ex_system_do_stall(u32 how_long)
acpi_status acpi_ex_system_do_suspend(acpi_integer how_long)
{
- acpi_status status;
-
ACPI_FUNCTION_ENTRY();
/* Since this thread will sleep, we must release the interpreter */
- acpi_ex_exit_interpreter();
+ acpi_ex_relinquish_interpreter();
acpi_os_sleep(how_long);
/* And now we must get the interpreter again */
- status = acpi_ex_enter_interpreter();
- return (status);
+ acpi_ex_reacquire_interpreter();
+ return (AE_OK);
}
/*******************************************************************************
diff --git a/drivers/acpi/executer/exutils.c b/drivers/acpi/executer/exutils.c
index aea461f3a48..6b0aeccbb69 100644
--- a/drivers/acpi/executer/exutils.c
+++ b/drivers/acpi/executer/exutils.c
@@ -76,14 +76,15 @@ static u32 acpi_ex_digits_needed(acpi_integer value, u32 base);
*
* PARAMETERS: None
*
- * RETURN: Status
+ * RETURN: None
*
- * DESCRIPTION: Enter the interpreter execution region. Failure to enter
- * the interpreter region is a fatal system error
+ * DESCRIPTION: Enter the interpreter execution region. Failure to enter
+ * the interpreter region is a fatal system error. Used in
+ * conjunction with exit_interpreter.
*
******************************************************************************/
-acpi_status acpi_ex_enter_interpreter(void)
+void acpi_ex_enter_interpreter(void)
{
acpi_status status;
@@ -91,31 +92,55 @@ acpi_status acpi_ex_enter_interpreter(void)
status = acpi_ut_acquire_mutex(ACPI_MTX_INTERPRETER);
if (ACPI_FAILURE(status)) {
- ACPI_ERROR((AE_INFO, "Could not acquire interpreter mutex"));
+ ACPI_ERROR((AE_INFO,
+ "Could not acquire AML Interpreter mutex"));
}
- return_ACPI_STATUS(status);
+ return_VOID;
}
/*******************************************************************************
*
- * FUNCTION: acpi_ex_exit_interpreter
+ * FUNCTION: acpi_ex_reacquire_interpreter
*
* PARAMETERS: None
*
* RETURN: None
*
- * DESCRIPTION: Exit the interpreter execution region
+ * DESCRIPTION: Reacquire the interpreter execution region from within the
+ * interpreter code. Failure to enter the interpreter region is a
+ * fatal system error. Used in conjuction with
+ * relinquish_interpreter
+ *
+ ******************************************************************************/
+
+void acpi_ex_reacquire_interpreter(void)
+{
+ ACPI_FUNCTION_TRACE(ex_reacquire_interpreter);
+
+ /*
+ * If the global serialized flag is set, do not release the interpreter,
+ * since it was not actually released by acpi_ex_relinquish_interpreter.
+ * This forces the interpreter to be single threaded.
+ */
+ if (!acpi_gbl_all_methods_serialized) {
+ acpi_ex_enter_interpreter();
+ }
+
+ return_VOID;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_exit_interpreter
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: None
*
- * Cases where the interpreter is unlocked:
- * 1) Completion of the execution of a control method
- * 2) Method blocked on a Sleep() AML opcode
- * 3) Method blocked on an Acquire() AML opcode
- * 4) Method blocked on a Wait() AML opcode
- * 5) Method blocked to acquire the global lock
- * 6) Method blocked to execute a serialized control method that is
- * already executing
- * 7) About to invoke a user-installed opregion handler
+ * DESCRIPTION: Exit the interpreter execution region. This is the top level
+ * routine used to exit the interpreter when all processing has
+ * been completed.
*
******************************************************************************/
@@ -127,7 +152,46 @@ void acpi_ex_exit_interpreter(void)
status = acpi_ut_release_mutex(ACPI_MTX_INTERPRETER);
if (ACPI_FAILURE(status)) {
- ACPI_ERROR((AE_INFO, "Could not release interpreter mutex"));
+ ACPI_ERROR((AE_INFO,
+ "Could not release AML Interpreter mutex"));
+ }
+
+ return_VOID;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_relinquish_interpreter
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Exit the interpreter execution region, from within the
+ * interpreter - before attempting an operation that will possibly
+ * block the running thread.
+ *
+ * Cases where the interpreter is unlocked internally
+ * 1) Method to be blocked on a Sleep() AML opcode
+ * 2) Method to be blocked on an Acquire() AML opcode
+ * 3) Method to be blocked on a Wait() AML opcode
+ * 4) Method to be blocked to acquire the global lock
+ * 5) Method to be blocked waiting to execute a serialized control method
+ * that is currently executing
+ * 6) About to invoke a user-installed opregion handler
+ *
+ ******************************************************************************/
+
+void acpi_ex_relinquish_interpreter(void)
+{
+ ACPI_FUNCTION_TRACE(ex_relinquish_interpreter);
+
+ /*
+ * If the global serialized flag is set, do not release the interpreter.
+ * This forces the interpreter to be single threaded.
+ */
+ if (!acpi_gbl_all_methods_serialized) {
+ acpi_ex_exit_interpreter();
}
return_VOID;
@@ -141,8 +205,8 @@ void acpi_ex_exit_interpreter(void)
*
* RETURN: none
*
- * DESCRIPTION: Truncate a number to 32-bits if the currently executing method
- * belongs to a 32-bit ACPI table.
+ * DESCRIPTION: Truncate an ACPI Integer to 32 bits if the execution mode is
+ * 32-bit, as determined by the revision of the DSDT.
*
******************************************************************************/
diff --git a/drivers/acpi/hardware/hwsleep.c b/drivers/acpi/hardware/hwsleep.c
index c84b1faba28..76c525dc590 100644
--- a/drivers/acpi/hardware/hwsleep.c
+++ b/drivers/acpi/hardware/hwsleep.c
@@ -152,7 +152,6 @@ acpi_get_firmware_waking_vector(acpi_physical_address * physical_address)
ACPI_EXPORT_SYMBOL(acpi_get_firmware_waking_vector)
#endif
-
/*******************************************************************************
*
* FUNCTION: acpi_enter_sleep_state_prep
diff --git a/drivers/acpi/namespace/nseval.c b/drivers/acpi/namespace/nseval.c
index 26fd0dd6953..97b2ac57c16 100644
--- a/drivers/acpi/namespace/nseval.c
+++ b/drivers/acpi/namespace/nseval.c
@@ -75,7 +75,7 @@ ACPI_MODULE_NAME("nseval")
* MUTEX: Locks interpreter
*
******************************************************************************/
-acpi_status acpi_ns_evaluate(struct acpi_evaluate_info *info)
+acpi_status acpi_ns_evaluate(struct acpi_evaluate_info * info)
{
acpi_status status;
@@ -154,11 +154,7 @@ acpi_status acpi_ns_evaluate(struct acpi_evaluate_info *info)
* Execute the method via the interpreter. The interpreter is locked
* here before calling into the AML parser
*/
- status = acpi_ex_enter_interpreter();
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
-
+ acpi_ex_enter_interpreter();
status = acpi_ps_execute_method(info);
acpi_ex_exit_interpreter();
} else {
@@ -182,10 +178,7 @@ acpi_status acpi_ns_evaluate(struct acpi_evaluate_info *info)
* resolution, we must lock it because we could access an opregion.
* The opregion access code assumes that the interpreter is locked.
*/
- status = acpi_ex_enter_interpreter();
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
+ acpi_ex_enter_interpreter();
/* Function has a strange interface */
diff --git a/drivers/acpi/namespace/nsinit.c b/drivers/acpi/namespace/nsinit.c
index c4ab615f77f..33db2241044 100644
--- a/drivers/acpi/namespace/nsinit.c
+++ b/drivers/acpi/namespace/nsinit.c
@@ -214,7 +214,7 @@ acpi_ns_init_one_object(acpi_handle obj_handle,
u32 level, void *context, void **return_value)
{
acpi_object_type type;
- acpi_status status;
+ acpi_status status = AE_OK;
struct acpi_init_walk_info *info =
(struct acpi_init_walk_info *)context;
struct acpi_namespace_node *node =
@@ -268,10 +268,7 @@ acpi_ns_init_one_object(acpi_handle obj_handle,
/*
* Must lock the interpreter before executing AML code
*/
- status = acpi_ex_enter_interpreter();
- if (ACPI_FAILURE(status)) {
- return (status);
- }
+ acpi_ex_enter_interpreter();
/*
* Each of these types can contain executable AML code within the
diff --git a/drivers/acpi/namespace/nswalk.c b/drivers/acpi/namespace/nswalk.c
index 94eb8f332d9..280b8357c46 100644
--- a/drivers/acpi/namespace/nswalk.c
+++ b/drivers/acpi/namespace/nswalk.c
@@ -65,10 +65,8 @@ ACPI_MODULE_NAME("nswalk")
* within Scope is returned.
*
******************************************************************************/
-struct acpi_namespace_node *acpi_ns_get_next_node(acpi_object_type type,
- struct acpi_namespace_node
- *parent_node,
- struct acpi_namespace_node
+struct acpi_namespace_node *acpi_ns_get_next_node(acpi_object_type type, struct acpi_namespace_node
+ *parent_node, struct acpi_namespace_node
*child_node)
{
struct acpi_namespace_node *next_node = NULL;
diff --git a/drivers/acpi/namespace/nsxfeval.c b/drivers/acpi/namespace/nsxfeval.c
index 8904d0fae6a..be4f2899de7 100644
--- a/drivers/acpi/namespace/nsxfeval.c
+++ b/drivers/acpi/namespace/nsxfeval.c
@@ -48,7 +48,6 @@
#define _COMPONENT ACPI_NAMESPACE
ACPI_MODULE_NAME("nsxfeval")
-
#ifdef ACPI_FUTURE_USAGE
/*******************************************************************************
*
@@ -73,8 +72,8 @@ ACPI_MODULE_NAME("nsxfeval")
acpi_status
acpi_evaluate_object_typed(acpi_handle handle,
acpi_string pathname,
- struct acpi_object_list * external_params,
- struct acpi_buffer * return_buffer,
+ struct acpi_object_list *external_params,
+ struct acpi_buffer *return_buffer,
acpi_object_type return_type)
{
acpi_status status;
@@ -143,7 +142,6 @@ acpi_evaluate_object_typed(acpi_handle handle,
ACPI_EXPORT_SYMBOL(acpi_evaluate_object_typed)
#endif /* ACPI_FUTURE_USAGE */
-
/*******************************************************************************
*
* FUNCTION: acpi_evaluate_object
@@ -170,7 +168,6 @@ acpi_evaluate_object(acpi_handle handle,
struct acpi_buffer *return_buffer)
{
acpi_status status;
- acpi_status status2;
struct acpi_evaluate_info *info;
acpi_size buffer_space_needed;
u32 i;
@@ -329,14 +326,12 @@ acpi_evaluate_object(acpi_handle handle,
* Delete the internal return object. NOTE: Interpreter must be
* locked to avoid race condition.
*/
- status2 = acpi_ex_enter_interpreter();
- if (ACPI_SUCCESS(status2)) {
+ acpi_ex_enter_interpreter();
- /* Remove one reference on the return object (should delete it) */
+ /* Remove one reference on the return object (should delete it) */
- acpi_ut_remove_reference(info->return_object);
- acpi_ex_exit_interpreter();
- }
+ acpi_ut_remove_reference(info->return_object);
+ acpi_ex_exit_interpreter();
}
cleanup:
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index c2bed56915e..b998340e23d 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -71,6 +71,7 @@ static unsigned int acpi_irq_irq;
static acpi_osd_handler acpi_irq_handler;
static void *acpi_irq_context;
static struct workqueue_struct *kacpid_wq;
+static struct workqueue_struct *kacpi_notify_wq;
static void __init acpi_request_region (struct acpi_generic_address *addr,
unsigned int length, char *desc)
@@ -137,8 +138,9 @@ acpi_status acpi_os_initialize1(void)
return AE_NULL_ENTRY;
}
kacpid_wq = create_singlethread_workqueue("kacpid");
+ kacpi_notify_wq = create_singlethread_workqueue("kacpi_notify");
BUG_ON(!kacpid_wq);
-
+ BUG_ON(!kacpi_notify_wq);
return AE_OK;
}
@@ -150,6 +152,7 @@ acpi_status acpi_os_terminate(void)
}
destroy_workqueue(kacpid_wq);
+ destroy_workqueue(kacpi_notify_wq);
return AE_OK;
}
@@ -603,6 +606,23 @@ void acpi_os_derive_pci_id(acpi_handle rhandle, /* upper bound */
static void acpi_os_execute_deferred(struct work_struct *work)
{
struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
+ if (!dpc) {
+ printk(KERN_ERR PREFIX "Invalid (NULL) context\n");
+ return;
+ }
+
+ dpc->function(dpc->context);
+ kfree(dpc);
+
+ /* Yield cpu to notify thread */
+ cond_resched();
+
+ return;
+}
+
+static void acpi_os_execute_notify(struct work_struct *work)
+{
+ struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
if (!dpc) {
printk(KERN_ERR PREFIX "Invalid (NULL) context\n");
@@ -637,14 +657,12 @@ acpi_status acpi_os_execute(acpi_execute_type type,
acpi_status status = AE_OK;
struct acpi_os_dpc *dpc;
- ACPI_FUNCTION_TRACE("os_queue_for_execution");
-
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
"Scheduling function [%p(%p)] for deferred execution.\n",
function, context));
if (!function)
- return_ACPI_STATUS(AE_BAD_PARAMETER);
+ return AE_BAD_PARAMETER;
/*
* Allocate/initialize DPC structure. Note that this memory will be
@@ -662,14 +680,21 @@ acpi_status acpi_os_execute(acpi_execute_type type,
dpc->function = function;
dpc->context = context;
- INIT_WORK(&dpc->work, acpi_os_execute_deferred);
- if (!queue_work(kacpid_wq, &dpc->work)) {
- ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
+ if (type == OSL_NOTIFY_HANDLER) {
+ INIT_WORK(&dpc->work, acpi_os_execute_notify);
+ if (!queue_work(kacpi_notify_wq, &dpc->work)) {
+ status = AE_ERROR;
+ kfree(dpc);
+ }
+ } else {
+ INIT_WORK(&dpc->work, acpi_os_execute_deferred);
+ if (!queue_work(kacpid_wq, &dpc->work)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Call to queue_work() failed.\n"));
- kfree(dpc);
- status = AE_ERROR;
+ status = AE_ERROR;
+ kfree(dpc);
+ }
}
-
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/parser/psopcode.c b/drivers/acpi/parser/psopcode.c
index 16d8b6cc3c2..9296e86761d 100644
--- a/drivers/acpi/parser/psopcode.c
+++ b/drivers/acpi/parser/psopcode.c
@@ -185,459 +185,453 @@ const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES] = {
/* Index Name Parser Args Interpreter Args ObjectType Class Type Flags */
/* 00 */ ACPI_OP("Zero", ARGP_ZERO_OP, ARGI_ZERO_OP, ACPI_TYPE_INTEGER,
- AML_CLASS_ARGUMENT, AML_TYPE_CONSTANT, AML_CONSTANT),
+ AML_CLASS_ARGUMENT, AML_TYPE_CONSTANT, AML_CONSTANT),
/* 01 */ ACPI_OP("One", ARGP_ONE_OP, ARGI_ONE_OP, ACPI_TYPE_INTEGER,
- AML_CLASS_ARGUMENT, AML_TYPE_CONSTANT, AML_CONSTANT),
+ AML_CLASS_ARGUMENT, AML_TYPE_CONSTANT, AML_CONSTANT),
/* 02 */ ACPI_OP("Alias", ARGP_ALIAS_OP, ARGI_ALIAS_OP,
- ACPI_TYPE_LOCAL_ALIAS, AML_CLASS_NAMED_OBJECT,
- AML_TYPE_NAMED_SIMPLE,
- AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
- AML_NSNODE | AML_NAMED),
+ ACPI_TYPE_LOCAL_ALIAS, AML_CLASS_NAMED_OBJECT,
+ AML_TYPE_NAMED_SIMPLE,
+ AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
+ AML_NSNODE | AML_NAMED),
/* 03 */ ACPI_OP("Name", ARGP_NAME_OP, ARGI_NAME_OP, ACPI_TYPE_ANY,
- AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_COMPLEX,
- AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
- AML_NSNODE | AML_NAMED),
+ AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_COMPLEX,
+ AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
+ AML_NSNODE | AML_NAMED),
/* 04 */ ACPI_OP("ByteConst", ARGP_BYTE_OP, ARGI_BYTE_OP,
- ACPI_TYPE_INTEGER, AML_CLASS_ARGUMENT,
- AML_TYPE_LITERAL, AML_CONSTANT),
+ ACPI_TYPE_INTEGER, AML_CLASS_ARGUMENT,
+ AML_TYPE_LITERAL, AML_CONSTANT),
/* 05 */ ACPI_OP("WordConst", ARGP_WORD_OP, ARGI_WORD_OP,
- ACPI_TYPE_INTEGER, AML_CLASS_ARGUMENT,
- AML_TYPE_LITERAL, AML_CONSTANT),
+ ACPI_TYPE_INTEGER, AML_CLASS_ARGUMENT,
+ AML_TYPE_LITERAL, AML_CONSTANT),
/* 06 */ ACPI_OP("DwordConst", ARGP_DWORD_OP, ARGI_DWORD_OP,
- ACPI_TYPE_INTEGER, AML_CLASS_ARGUMENT,
- AML_TYPE_LITERAL, AML_CONSTANT),
+ ACPI_TYPE_INTEGER, AML_CLASS_ARGUMENT,
+ AML_TYPE_LITERAL, AML_CONSTANT),
/* 07 */ ACPI_OP("String", ARGP_STRING_OP, ARGI_STRING_OP,
- ACPI_TYPE_STRING, AML_CLASS_ARGUMENT,
- AML_TYPE_LITERAL, AML_CONSTANT),
+ ACPI_TYPE_STRING, AML_CLASS_ARGUMENT,
+ AML_TYPE_LITERAL, AML_CONSTANT),
/* 08 */ ACPI_OP("Scope", ARGP_SCOPE_OP, ARGI_SCOPE_OP,
- ACPI_TYPE_LOCAL_SCOPE, AML_CLASS_NAMED_OBJECT,
- AML_TYPE_NAMED_NO_OBJ,
- AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
- AML_NSNODE | AML_NAMED),
+ ACPI_TYPE_LOCAL_SCOPE, AML_CLASS_NAMED_OBJECT,
+ AML_TYPE_NAMED_NO_OBJ,
+ AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
+ AML_NSNODE | AML_NAMED),
/* 09 */ ACPI_OP("Buffer", ARGP_BUFFER_OP, ARGI_BUFFER_OP,
- ACPI_TYPE_BUFFER, AML_CLASS_CREATE,
- AML_TYPE_CREATE_OBJECT,
- AML_HAS_ARGS | AML_DEFER | AML_CONSTANT),
+ ACPI_TYPE_BUFFER, AML_CLASS_CREATE,
+ AML_TYPE_CREATE_OBJECT,
+ AML_HAS_ARGS | AML_DEFER | AML_CONSTANT),
/* 0A */ ACPI_OP("Package", ARGP_PACKAGE_OP, ARGI_PACKAGE_OP,
- ACPI_TYPE_PACKAGE, AML_CLASS_CREATE,
- AML_TYPE_CREATE_OBJECT,
- AML_HAS_ARGS | AML_DEFER | AML_CONSTANT),
+ ACPI_TYPE_PACKAGE, AML_CLASS_CREATE,
+ AML_TYPE_CREATE_OBJECT,
+ AML_HAS_ARGS | AML_DEFER | AML_CONSTANT),
/* 0B */ ACPI_OP("Method", ARGP_METHOD_OP, ARGI_METHOD_OP,
- ACPI_TYPE_METHOD, AML_CLASS_NAMED_OBJECT,
- AML_TYPE_NAMED_COMPLEX,
- AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
- AML_NSNODE | AML_NAMED | AML_DEFER),
+ ACPI_TYPE_METHOD, AML_CLASS_NAMED_OBJECT,
+ AML_TYPE_NAMED_COMPLEX,
+ AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
+ AML_NSNODE | AML_NAMED | AML_DEFER),
/* 0C */ ACPI_OP("Local0", ARGP_LOCAL0, ARGI_LOCAL0,
- ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
- AML_TYPE_LOCAL_VARIABLE, 0),
+ ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
+ AML_TYPE_LOCAL_VARIABLE, 0),
/* 0D */ ACPI_OP("Local1", ARGP_LOCAL1, ARGI_LOCAL1,
- ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
- AML_TYPE_LOCAL_VARIABLE, 0),
+ ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
+ AML_TYPE_LOCAL_VARIABLE, 0),
/* 0E */ ACPI_OP("Local2", ARGP_LOCAL2, ARGI_LOCAL2,
- ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
- AML_TYPE_LOCAL_VARIABLE, 0),
+ ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
+ AML_TYPE_LOCAL_VARIABLE, 0),
/* 0F */ ACPI_OP("Local3", ARGP_LOCAL3, ARGI_LOCAL3,
- ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
- AML_TYPE_LOCAL_VARIABLE, 0),
+ ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
+ AML_TYPE_LOCAL_VARIABLE, 0),
/* 10 */ ACPI_OP("Local4", ARGP_LOCAL4, ARGI_LOCAL4,
- ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
- AML_TYPE_LOCAL_VARIABLE, 0),
+ ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
+ AML_TYPE_LOCAL_VARIABLE, 0),
/* 11 */ ACPI_OP("Local5", ARGP_LOCAL5, ARGI_LOCAL5,
- ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
- AML_TYPE_LOCAL_VARIABLE, 0),
+ ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
+ AML_TYPE_LOCAL_VARIABLE, 0),
/* 12 */ ACPI_OP("Local6", ARGP_LOCAL6, ARGI_LOCAL6,
- ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
- AML_TYPE_LOCAL_VARIABLE, 0),
+ ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
+ AML_TYPE_LOCAL_VARIABLE, 0),
/* 13 */ ACPI_OP("Local7", ARGP_LOCAL7, ARGI_LOCAL7,
- ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
- AML_TYPE_LOCAL_VARIABLE, 0),
+ ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
+ AML_TYPE_LOCAL_VARIABLE, 0),
/* 14 */ ACPI_OP("Arg0", ARGP_ARG0, ARGI_ARG0,
- ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
- AML_TYPE_METHOD_ARGUMENT, 0),
+ ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
+ AML_TYPE_METHOD_ARGUMENT, 0),
/* 15 */ ACPI_OP("Arg1", ARGP_ARG1, ARGI_ARG1,
- ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
- AML_TYPE_METHOD_ARGUMENT, 0),
+ ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
+ AML_TYPE_METHOD_ARGUMENT, 0),
/* 16 */ ACPI_OP("Arg2", ARGP_ARG2, ARGI_ARG2,
- ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
- AML_TYPE_METHOD_ARGUMENT, 0),
+ ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
+ AML_TYPE_METHOD_ARGUMENT, 0),
/* 17 */ ACPI_OP("Arg3", ARGP_ARG3, ARGI_ARG3,
- ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
- AML_TYPE_METHOD_ARGUMENT, 0),
+ ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
+ AML_TYPE_METHOD_ARGUMENT, 0),
/* 18 */ ACPI_OP("Arg4", ARGP_ARG4, ARGI_ARG4,
- ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
- AML_TYPE_METHOD_ARGUMENT, 0),
+ ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
+ AML_TYPE_METHOD_ARGUMENT, 0),
/* 19 */ ACPI_OP("Arg5", ARGP_ARG5, ARGI_ARG5,
- ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
- AML_TYPE_METHOD_ARGUMENT, 0),
+ ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
+ AML_TYPE_METHOD_ARGUMENT, 0),
/* 1A */ ACPI_OP("Arg6", ARGP_ARG6, ARGI_ARG6,
- ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
- AML_TYPE_METHOD_ARGUMENT, 0),
+ ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
+ AML_TYPE_METHOD_ARGUMENT, 0),
/* 1B */ ACPI_OP("Store", ARGP_STORE_OP, ARGI_STORE_OP, ACPI_TYPE_ANY,
- AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R,
- AML_FLAGS_EXEC_1A_1T_1R),
+ AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R,
+ AML_FLAGS_EXEC_1A_1T_1R),
/* 1C */ ACPI_OP("RefOf", ARGP_REF_OF_OP, ARGI_REF_OF_OP, ACPI_TYPE_ANY,
- AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_1R,
- AML_FLAGS_EXEC_1A_0T_1R),
+ AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_1R,
+ AML_FLAGS_EXEC_1A_0T_1R),
/* 1D */ ACPI_OP("Add", ARGP_ADD_OP, ARGI_ADD_OP, ACPI_TYPE_ANY,
- AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R,
- AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT),
+ AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R,
+ AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT),
/* 1E */ ACPI_OP("Concatenate", ARGP_CONCAT_OP, ARGI_CONCAT_OP,
- ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
- AML_TYPE_EXEC_2A_1T_1R,
- AML_FLAGS_EXEC_2A_1T_1R | AML_CONSTANT),
+ ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
+ AML_TYPE_EXEC_2A_1T_1R,
+ AML_FLAGS_EXEC_2A_1T_1R | AML_CONSTANT),
/* 1F */ ACPI_OP("Subtract", ARGP_SUBTRACT_OP, ARGI_SUBTRACT_OP,
- ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
- AML_TYPE_EXEC_2A_1T_1R,
- AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT),
+ ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
+ AML_TYPE_EXEC_2A_1T_1R,
+ AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT),
/* 20 */ ACPI_OP("Increment", ARGP_INCREMENT_OP, ARGI_INCREMENT_OP,
- ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
- AML_TYPE_EXEC_1A_0T_1R,
- AML_FLAGS_EXEC_1A_0T_1R | AML_CONSTANT),
+ ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
+ AML_TYPE_EXEC_1A_0T_1R,
+ AML_FLAGS_EXEC_1A_0T_1R | AML_CONSTANT),
/* 21 */ ACPI_OP("Decrement", ARGP_DECREMENT_OP, ARGI_DECREMENT_OP,
- ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
- AML_TYPE_EXEC_1A_0T_1R,
- AML_FLAGS_EXEC_1A_0T_1R | AML_CONSTANT),
+ ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
+ AML_TYPE_EXEC_1A_0T_1R,
+ AML_FLAGS_EXEC_1A_0T_1R | AML_CONSTANT),
/* 22 */ ACPI_OP("Multiply", ARGP_MULTIPLY_OP, ARGI_MULTIPLY_OP,
- ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
- AML_TYPE_EXEC_2A_1T_1R,
- AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT),
+ ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
+ AML_TYPE_EXEC_2A_1T_1R,
+ AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT),
/* 23 */ ACPI_OP("Divide", ARGP_DIVIDE_OP, ARGI_DIVIDE_OP,
- ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
- AML_TYPE_EXEC_2A_2T_1R,
- AML_FLAGS_EXEC_2A_2T_1R | AML_CONSTANT),
+ ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
+ AML_TYPE_EXEC_2A_2T_1R,
+ AML_FLAGS_EXEC_2A_2T_1R | AML_CONSTANT),
/* 24 */ ACPI_OP("ShiftLeft", ARGP_SHIFT_LEFT_OP, ARGI_SHIFT_LEFT_OP,
- ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
- AML_TYPE_EXEC_2A_1T_1R,
- AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT),
+ ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
+ AML_TYPE_EXEC_2A_1T_1R,
+ AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT),
/* 25 */ ACPI_OP("ShiftRight", ARGP_SHIFT_RIGHT_OP, ARGI_SHIFT_RIGHT_OP,
- ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
- AML_TYPE_EXEC_2A_1T_1R,
- AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT),
+ ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
+ AML_TYPE_EXEC_2A_1T_1R,
+ AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT),
/* 26 */ ACPI_OP("And", ARGP_BIT_AND_OP, ARGI_BIT_AND_OP, ACPI_TYPE_ANY,
- AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R,
- AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT),
+ AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R,
+ AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT),
/* 27 */ ACPI_OP("NAnd", ARGP_BIT_NAND_OP, ARGI_BIT_NAND_OP,
- ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
- AML_TYPE_EXEC_2A_1T_1R,
- AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT),
+ ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
+ AML_TYPE_EXEC_2A_1T_1R,
+ AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT),
/* 28 */ ACPI_OP("Or", ARGP_BIT_OR_OP, ARGI_BIT_OR_OP, ACPI_TYPE_ANY,
- AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R,
- AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT),
+ AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R,
+ AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT),
/* 29 */ ACPI_OP("NOr", ARGP_BIT_NOR_OP, ARGI_BIT_NOR_OP, ACPI_TYPE_ANY,
- AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R,
- AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT),
+ AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R,
+ AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT),
/* 2A */ ACPI_OP("XOr", ARGP_BIT_XOR_OP, ARGI_BIT_XOR_OP, ACPI_TYPE_ANY,
- AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R,
- AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT),
+ AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R,
+ AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT),
/* 2B */ ACPI_OP("Not", ARGP_BIT_NOT_OP, ARGI_BIT_NOT_OP, ACPI_TYPE_ANY,
- AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R,
- AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT),
+ AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R,
+ AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT),
/* 2C */ ACPI_OP("FindSetLeftBit", ARGP_FIND_SET_LEFT_BIT_OP,
- ARGI_FIND_SET_LEFT_BIT_OP, ACPI_TYPE_ANY,
- AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R,
- AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT),
+ ARGI_FIND_SET_LEFT_BIT_OP, ACPI_TYPE_ANY,
+ AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R,
+ AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT),
/* 2D */ ACPI_OP("FindSetRightBit", ARGP_FIND_SET_RIGHT_BIT_OP,
- ARGI_FIND_SET_RIGHT_BIT_OP, ACPI_TYPE_ANY,
- AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R,
- AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT),
+ ARGI_FIND_SET_RIGHT_BIT_OP, ACPI_TYPE_ANY,
+ AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R,
+ AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT),
/* 2E */ ACPI_OP("DerefOf", ARGP_DEREF_OF_OP, ARGI_DEREF_OF_OP,
- ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
- AML_TYPE_EXEC_1A_0T_1R, AML_FLAGS_EXEC_1A_0T_1R),
+ ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
+ AML_TYPE_EXEC_1A_0T_1R, AML_FLAGS_EXEC_1A_0T_1R),
/* 2F */ ACPI_OP("Notify", ARGP_NOTIFY_OP, ARGI_NOTIFY_OP,
- ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
- AML_TYPE_EXEC_2A_0T_0R, AML_FLAGS_EXEC_2A_0T_0R),
+ ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
+ AML_TYPE_EXEC_2A_0T_0R, AML_FLAGS_EXEC_2A_0T_0R),
/* 30 */ ACPI_OP("SizeOf", ARGP_SIZE_OF_OP, ARGI_SIZE_OF_OP,
- ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
- AML_TYPE_EXEC_1A_0T_1R,
- AML_FLAGS_EXEC_1A_0T_1R | AML_NO_OPERAND_RESOLVE),
+ ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
+ AML_TYPE_EXEC_1A_0T_1R,
+ AML_FLAGS_EXEC_1A_0T_1R | AML_NO_OPERAND_RESOLVE),
/* 31 */ ACPI_OP("Index", ARGP_INDEX_OP, ARGI_INDEX_OP, ACPI_TYPE_ANY,
- AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R,
- AML_FLAGS_EXEC_2A_1T_1R),
+ AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R,
+ AML_FLAGS_EXEC_2A_1T_1R),
/* 32 */ ACPI_OP("Match", ARGP_MATCH_OP, ARGI_MATCH_OP, ACPI_TYPE_ANY,
- AML_CLASS_EXECUTE, AML_TYPE_EXEC_6A_0T_1R,
- AML_FLAGS_EXEC_6A_0T_1R | AML_CONSTANT),
+ AML_CLASS_EXECUTE, AML_TYPE_EXEC_6A_0T_1R,
+ AML_FLAGS_EXEC_6A_0T_1R | AML_CONSTANT),
/* 33 */ ACPI_OP("CreateDWordField", ARGP_CREATE_DWORD_FIELD_OP,
- ARGI_CREATE_DWORD_FIELD_OP,
- ACPI_TYPE_BUFFER_FIELD, AML_CLASS_CREATE,
- AML_TYPE_CREATE_FIELD,
- AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE |
- AML_DEFER | AML_CREATE),
+ ARGI_CREATE_DWORD_FIELD_OP,
+ ACPI_TYPE_BUFFER_FIELD, AML_CLASS_CREATE,
+ AML_TYPE_CREATE_FIELD,
+ AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE |
+ AML_DEFER | AML_CREATE),
/* 34 */ ACPI_OP("CreateWordField", ARGP_CREATE_WORD_FIELD_OP,
- ARGI_CREATE_WORD_FIELD_OP,
- ACPI_TYPE_BUFFER_FIELD, AML_CLASS_CREATE,
- AML_TYPE_CREATE_FIELD,
- AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE |
- AML_DEFER | AML_CREATE),
+ ARGI_CREATE_WORD_FIELD_OP,
+ ACPI_TYPE_BUFFER_FIELD, AML_CLASS_CREATE,
+ AML_TYPE_CREATE_FIELD,
+ AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE |
+ AML_DEFER | AML_CREATE),
/* 35 */ ACPI_OP("CreateByteField", ARGP_CREATE_BYTE_FIELD_OP,
- ARGI_CREATE_BYTE_FIELD_OP,
- ACPI_TYPE_BUFFER_FIELD, AML_CLASS_CREATE,
- AML_TYPE_CREATE_FIELD,
- AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE |
- AML_DEFER | AML_CREATE),
+ ARGI_CREATE_BYTE_FIELD_OP,
+ ACPI_TYPE_BUFFER_FIELD, AML_CLASS_CREATE,
+ AML_TYPE_CREATE_FIELD,
+ AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE |
+ AML_DEFER | AML_CREATE),
/* 36 */ ACPI_OP("CreateBitField", ARGP_CREATE_BIT_FIELD_OP,
- ARGI_CREATE_BIT_FIELD_OP,
- ACPI_TYPE_BUFFER_FIELD, AML_CLASS_CREATE,
- AML_TYPE_CREATE_FIELD,
- AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE |
- AML_DEFER | AML_CREATE),
+ ARGI_CREATE_BIT_FIELD_OP,
+ ACPI_TYPE_BUFFER_FIELD, AML_CLASS_CREATE,
+ AML_TYPE_CREATE_FIELD,
+ AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE |
+ AML_DEFER | AML_CREATE),
/* 37 */ ACPI_OP("ObjectType", ARGP_TYPE_OP, ARGI_TYPE_OP,
- ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
- AML_TYPE_EXEC_1A_0T_1R,
- AML_FLAGS_EXEC_1A_0T_1R | AML_NO_OPERAND_RESOLVE),
+ ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
+ AML_TYPE_EXEC_1A_0T_1R,
+ AML_FLAGS_EXEC_1A_0T_1R | AML_NO_OPERAND_RESOLVE),
/* 38 */ ACPI_OP("LAnd", ARGP_LAND_OP, ARGI_LAND_OP, ACPI_TYPE_ANY,
- AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_0T_1R,
- AML_FLAGS_EXEC_2A_0T_1R | AML_LOGICAL_NUMERIC |
- AML_CONSTANT),
+ AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_0T_1R,
+ AML_FLAGS_EXEC_2A_0T_1R | AML_LOGICAL_NUMERIC | AML_CONSTANT),
/* 39 */ ACPI_OP("LOr", ARGP_LOR_OP, ARGI_LOR_OP, ACPI_TYPE_ANY,
- AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_0T_1R,
- AML_FLAGS_EXEC_2A_0T_1R | AML_LOGICAL_NUMERIC |
- AML_CONSTANT),
+ AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_0T_1R,
+ AML_FLAGS_EXEC_2A_0T_1R | AML_LOGICAL_NUMERIC | AML_CONSTANT),
/* 3A */ ACPI_OP("LNot", ARGP_LNOT_OP, ARGI_LNOT_OP, ACPI_TYPE_ANY,
- AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_1R,
- AML_FLAGS_EXEC_1A_0T_1R | AML_CONSTANT),
+ AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_1R,
+ AML_FLAGS_EXEC_1A_0T_1R | AML_CONSTANT),
/* 3B */ ACPI_OP("LEqual", ARGP_LEQUAL_OP, ARGI_LEQUAL_OP,
- ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
- AML_TYPE_EXEC_2A_0T_1R,
- AML_FLAGS_EXEC_2A_0T_1R | AML_LOGICAL | AML_CONSTANT),
+ ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
+ AML_TYPE_EXEC_2A_0T_1R,
+ AML_FLAGS_EXEC_2A_0T_1R | AML_LOGICAL | AML_CONSTANT),
/* 3C */ ACPI_OP("LGreater", ARGP_LGREATER_OP, ARGI_LGREATER_OP,
- ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
- AML_TYPE_EXEC_2A_0T_1R,
- AML_FLAGS_EXEC_2A_0T_1R | AML_LOGICAL | AML_CONSTANT),
+ ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
+ AML_TYPE_EXEC_2A_0T_1R,
+ AML_FLAGS_EXEC_2A_0T_1R | AML_LOGICAL | AML_CONSTANT),
/* 3D */ ACPI_OP("LLess", ARGP_LLESS_OP, ARGI_LLESS_OP, ACPI_TYPE_ANY,
- AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_0T_1R,
- AML_FLAGS_EXEC_2A_0T_1R | AML_LOGICAL | AML_CONSTANT),
+ AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_0T_1R,
+ AML_FLAGS_EXEC_2A_0T_1R | AML_LOGICAL | AML_CONSTANT),
/* 3E */ ACPI_OP("If", ARGP_IF_OP, ARGI_IF_OP, ACPI_TYPE_ANY,
- AML_CLASS_CONTROL, AML_TYPE_CONTROL, AML_HAS_ARGS),
+ AML_CLASS_CONTROL, AML_TYPE_CONTROL, AML_HAS_ARGS),
/* 3F */ ACPI_OP("Else", ARGP_ELSE_OP, ARGI_ELSE_OP, ACPI_TYPE_ANY,
- AML_CLASS_CONTROL, AML_TYPE_CONTROL, AML_HAS_ARGS),
+ AML_CLASS_CONTROL, AML_TYPE_CONTROL, AML_HAS_ARGS),
/* 40 */ ACPI_OP("While", ARGP_WHILE_OP, ARGI_WHILE_OP, ACPI_TYPE_ANY,
- AML_CLASS_CONTROL, AML_TYPE_CONTROL, AML_HAS_ARGS),
+ AML_CLASS_CONTROL, AML_TYPE_CONTROL, AML_HAS_ARGS),
/* 41 */ ACPI_OP("Noop", ARGP_NOOP_OP, ARGI_NOOP_OP, ACPI_TYPE_ANY,
- AML_CLASS_CONTROL, AML_TYPE_CONTROL, 0),
+ AML_CLASS_CONTROL, AML_TYPE_CONTROL, 0),
/* 42 */ ACPI_OP("Return", ARGP_RETURN_OP, ARGI_RETURN_OP,
- ACPI_TYPE_ANY, AML_CLASS_CONTROL,
- AML_TYPE_CONTROL, AML_HAS_ARGS),
+ ACPI_TYPE_ANY, AML_CLASS_CONTROL,
+ AML_TYPE_CONTROL, AML_HAS_ARGS),
/* 43 */ ACPI_OP("Break", ARGP_BREAK_OP, ARGI_BREAK_OP, ACPI_TYPE_ANY,
- AML_CLASS_CONTROL, AML_TYPE_CONTROL, 0),
+ AML_CLASS_CONTROL, AML_TYPE_CONTROL, 0),
/* 44 */ ACPI_OP("BreakPoint", ARGP_BREAK_POINT_OP, ARGI_BREAK_POINT_OP,
- ACPI_TYPE_ANY, AML_CLASS_CONTROL, AML_TYPE_CONTROL, 0),
+ ACPI_TYPE_ANY, AML_CLASS_CONTROL, AML_TYPE_CONTROL, 0),
/* 45 */ ACPI_OP("Ones", ARGP_ONES_OP, ARGI_ONES_OP, ACPI_TYPE_INTEGER,
- AML_CLASS_ARGUMENT, AML_TYPE_CONSTANT, AML_CONSTANT),
+ AML_CLASS_ARGUMENT, AML_TYPE_CONSTANT, AML_CONSTANT),
/* Prefixed opcodes (Two-byte opcodes with a prefix op) */
/* 46 */ ACPI_OP("Mutex", ARGP_MUTEX_OP, ARGI_MUTEX_OP, ACPI_TYPE_MUTEX,
- AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_SIMPLE,
- AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
- AML_NSNODE | AML_NAMED),
+ AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_SIMPLE,
+ AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
+ AML_NSNODE | AML_NAMED),
/* 47 */ ACPI_OP("Event", ARGP_EVENT_OP, ARGI_EVENT_OP, ACPI_TYPE_EVENT,
- AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_SIMPLE,
- AML_NSOBJECT | AML_NSOPCODE | AML_NSNODE | AML_NAMED),
+ AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_SIMPLE,
+ AML_NSOBJECT | AML_NSOPCODE | AML_NSNODE | AML_NAMED),
/* 48 */ ACPI_OP("CondRefOf", ARGP_COND_REF_OF_OP, ARGI_COND_REF_OF_OP,
- ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
- AML_TYPE_EXEC_1A_1T_1R, AML_FLAGS_EXEC_1A_1T_1R),
+ ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
+ AML_TYPE_EXEC_1A_1T_1R, AML_FLAGS_EXEC_1A_1T_1R),
/* 49 */ ACPI_OP("CreateField", ARGP_CREATE_FIELD_OP,
- ARGI_CREATE_FIELD_OP, ACPI_TYPE_BUFFER_FIELD,
- AML_CLASS_CREATE, AML_TYPE_CREATE_FIELD,
- AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE |
- AML_DEFER | AML_FIELD | AML_CREATE),
+ ARGI_CREATE_FIELD_OP, ACPI_TYPE_BUFFER_FIELD,
+ AML_CLASS_CREATE, AML_TYPE_CREATE_FIELD,
+ AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE |
+ AML_DEFER | AML_FIELD | AML_CREATE),
/* 4A */ ACPI_OP("Load", ARGP_LOAD_OP, ARGI_LOAD_OP, ACPI_TYPE_ANY,
- AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_0R,
- AML_FLAGS_EXEC_1A_1T_0R),
+ AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_0R,
+ AML_FLAGS_EXEC_1A_1T_0R),
/* 4B */ ACPI_OP("Stall", ARGP_STALL_OP, ARGI_STALL_OP, ACPI_TYPE_ANY,
- AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_0R,
- AML_FLAGS_EXEC_1A_0T_0R),
+ AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_0R,
+ AML_FLAGS_EXEC_1A_0T_0R),
/* 4C */ ACPI_OP("Sleep", ARGP_SLEEP_OP, ARGI_SLEEP_OP, ACPI_TYPE_ANY,
- AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_0R,
- AML_FLAGS_EXEC_1A_0T_0R),
+ AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_0R,
+ AML_FLAGS_EXEC_1A_0T_0R),
/* 4D */ ACPI_OP("Acquire", ARGP_ACQUIRE_OP, ARGI_ACQUIRE_OP,
- ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
- AML_TYPE_EXEC_2A_0T_1R, AML_FLAGS_EXEC_2A_0T_1R),
+ ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
+ AML_TYPE_EXEC_2A_0T_1R, AML_FLAGS_EXEC_2A_0T_1R),
/* 4E */ ACPI_OP("Signal", ARGP_SIGNAL_OP, ARGI_SIGNAL_OP,
- ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
- AML_TYPE_EXEC_1A_0T_0R, AML_FLAGS_EXEC_1A_0T_0R),
+ ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
+ AML_TYPE_EXEC_1A_0T_0R, AML_FLAGS_EXEC_1A_0T_0R),
/* 4F */ ACPI_OP("Wait", ARGP_WAIT_OP, ARGI_WAIT_OP, ACPI_TYPE_ANY,
- AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_0T_1R,
- AML_FLAGS_EXEC_2A_0T_1R),
+ AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_0T_1R,
+ AML_FLAGS_EXEC_2A_0T_1R),
/* 50 */ ACPI_OP("Reset", ARGP_RESET_OP, ARGI_RESET_OP, ACPI_TYPE_ANY,
- AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_0R,
- AML_FLAGS_EXEC_1A_0T_0R),
+ AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_0R,
+ AML_FLAGS_EXEC_1A_0T_0R),
/* 51 */ ACPI_OP("Release", ARGP_RELEASE_OP, ARGI_RELEASE_OP,
- ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
- AML_TYPE_EXEC_1A_0T_0R, AML_FLAGS_EXEC_1A_0T_0R),
+ ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
+ AML_TYPE_EXEC_1A_0T_0R, AML_FLAGS_EXEC_1A_0T_0R),
/* 52 */ ACPI_OP("FromBCD", ARGP_FROM_BCD_OP, ARGI_FROM_BCD_OP,
- ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
- AML_TYPE_EXEC_1A_1T_1R,
- AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT),
+ ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
+ AML_TYPE_EXEC_1A_1T_1R,
+ AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT),
/* 53 */ ACPI_OP("ToBCD", ARGP_TO_BCD_OP, ARGI_TO_BCD_OP, ACPI_TYPE_ANY,
- AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R,
- AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT),
+ AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R,
+ AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT),
/* 54 */ ACPI_OP("Unload", ARGP_UNLOAD_OP, ARGI_UNLOAD_OP,
- ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
- AML_TYPE_EXEC_1A_0T_0R, AML_FLAGS_EXEC_1A_0T_0R),
+ ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
+ AML_TYPE_EXEC_1A_0T_0R, AML_FLAGS_EXEC_1A_0T_0R),
/* 55 */ ACPI_OP("Revision", ARGP_REVISION_OP, ARGI_REVISION_OP,
- ACPI_TYPE_INTEGER, AML_CLASS_ARGUMENT,
- AML_TYPE_CONSTANT, 0),
+ ACPI_TYPE_INTEGER, AML_CLASS_ARGUMENT,
+ AML_TYPE_CONSTANT, 0),
/* 56 */ ACPI_OP("Debug", ARGP_DEBUG_OP, ARGI_DEBUG_OP,
- ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
- AML_TYPE_CONSTANT, 0),
+ ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
+ AML_TYPE_CONSTANT, 0),
/* 57 */ ACPI_OP("Fatal", ARGP_FATAL_OP, ARGI_FATAL_OP, ACPI_TYPE_ANY,
- AML_CLASS_EXECUTE, AML_TYPE_EXEC_3A_0T_0R,
- AML_FLAGS_EXEC_3A_0T_0R),
+ AML_CLASS_EXECUTE, AML_TYPE_EXEC_3A_0T_0R,
+ AML_FLAGS_EXEC_3A_0T_0R),
/* 58 */ ACPI_OP("OperationRegion", ARGP_REGION_OP, ARGI_REGION_OP,
- ACPI_TYPE_REGION, AML_CLASS_NAMED_OBJECT,
- AML_TYPE_NAMED_COMPLEX,
- AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
- AML_NSNODE | AML_NAMED | AML_DEFER),
+ ACPI_TYPE_REGION, AML_CLASS_NAMED_OBJECT,
+ AML_TYPE_NAMED_COMPLEX,
+ AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
+ AML_NSNODE | AML_NAMED | AML_DEFER),
/* 59 */ ACPI_OP("Field", ARGP_FIELD_OP, ARGI_FIELD_OP, ACPI_TYPE_ANY,
- AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_FIELD,
- AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
- AML_FIELD),
+ AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_FIELD,
+ AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | AML_FIELD),
/* 5A */ ACPI_OP("Device", ARGP_DEVICE_OP, ARGI_DEVICE_OP,
- ACPI_TYPE_DEVICE, AML_CLASS_NAMED_OBJECT,
- AML_TYPE_NAMED_NO_OBJ,
- AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
- AML_NSNODE | AML_NAMED),
+ ACPI_TYPE_DEVICE, AML_CLASS_NAMED_OBJECT,
+ AML_TYPE_NAMED_NO_OBJ,
+ AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
+ AML_NSNODE | AML_NAMED),
/* 5B */ ACPI_OP("Processor", ARGP_PROCESSOR_OP, ARGI_PROCESSOR_OP,
- ACPI_TYPE_PROCESSOR, AML_CLASS_NAMED_OBJECT,
- AML_TYPE_NAMED_SIMPLE,
- AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
- AML_NSNODE | AML_NAMED),
+ ACPI_TYPE_PROCESSOR, AML_CLASS_NAMED_OBJECT,
+ AML_TYPE_NAMED_SIMPLE,
+ AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
+ AML_NSNODE | AML_NAMED),
/* 5C */ ACPI_OP("PowerResource", ARGP_POWER_RES_OP, ARGI_POWER_RES_OP,
- ACPI_TYPE_POWER, AML_CLASS_NAMED_OBJECT,
- AML_TYPE_NAMED_SIMPLE,
- AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
- AML_NSNODE | AML_NAMED),
+ ACPI_TYPE_POWER, AML_CLASS_NAMED_OBJECT,
+ AML_TYPE_NAMED_SIMPLE,
+ AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
+ AML_NSNODE | AML_NAMED),
/* 5D */ ACPI_OP("ThermalZone", ARGP_THERMAL_ZONE_OP,
- ARGI_THERMAL_ZONE_OP, ACPI_TYPE_THERMAL,
- AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_NO_OBJ,
- AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
- AML_NSNODE | AML_NAMED),
+ ARGI_THERMAL_ZONE_OP, ACPI_TYPE_THERMAL,
+ AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_NO_OBJ,
+ AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
+ AML_NSNODE | AML_NAMED),
/* 5E */ ACPI_OP("IndexField", ARGP_INDEX_FIELD_OP, ARGI_INDEX_FIELD_OP,
- ACPI_TYPE_ANY, AML_CLASS_NAMED_OBJECT,
- AML_TYPE_NAMED_FIELD,
- AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
- AML_FIELD),
+ ACPI_TYPE_ANY, AML_CLASS_NAMED_OBJECT,
+ AML_TYPE_NAMED_FIELD,
+ AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | AML_FIELD),
/* 5F */ ACPI_OP("BankField", ARGP_BANK_FIELD_OP, ARGI_BANK_FIELD_OP,
- ACPI_TYPE_ANY, AML_CLASS_NAMED_OBJECT,
- AML_TYPE_NAMED_FIELD,
- AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
- AML_FIELD),
+ ACPI_TYPE_ANY, AML_CLASS_NAMED_OBJECT,
+ AML_TYPE_NAMED_FIELD,
+ AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | AML_FIELD),
/* Internal opcodes that map to invalid AML opcodes */
/* 60 */ ACPI_OP("LNotEqual", ARGP_LNOTEQUAL_OP, ARGI_LNOTEQUAL_OP,
- ACPI_TYPE_ANY, AML_CLASS_INTERNAL,
- AML_TYPE_BOGUS, AML_HAS_ARGS | AML_CONSTANT),
+ ACPI_TYPE_ANY, AML_CLASS_INTERNAL,
+ AML_TYPE_BOGUS, AML_HAS_ARGS | AML_CONSTANT),
/* 61 */ ACPI_OP("LLessEqual", ARGP_LLESSEQUAL_OP, ARGI_LLESSEQUAL_OP,
- ACPI_TYPE_ANY, AML_CLASS_INTERNAL,
- AML_TYPE_BOGUS, AML_HAS_ARGS | AML_CONSTANT),
+ ACPI_TYPE_ANY, AML_CLASS_INTERNAL,
+ AML_TYPE_BOGUS, AML_HAS_ARGS | AML_CONSTANT),
/* 62 */ ACPI_OP("LGreaterEqual", ARGP_LGREATEREQUAL_OP,
- ARGI_LGREATEREQUAL_OP, ACPI_TYPE_ANY,
- AML_CLASS_INTERNAL, AML_TYPE_BOGUS,
- AML_HAS_ARGS | AML_CONSTANT),
+ ARGI_LGREATEREQUAL_OP, ACPI_TYPE_ANY,
+ AML_CLASS_INTERNAL, AML_TYPE_BOGUS,
+ AML_HAS_ARGS | AML_CONSTANT),
/* 63 */ ACPI_OP("-NamePath-", ARGP_NAMEPATH_OP, ARGI_NAMEPATH_OP,
- ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
- AML_TYPE_LITERAL, AML_NSOBJECT | AML_NSNODE),
+ ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
+ AML_TYPE_LITERAL, AML_NSOBJECT | AML_NSNODE),
/* 64 */ ACPI_OP("-MethodCall-", ARGP_METHODCALL_OP, ARGI_METHODCALL_OP,
- ACPI_TYPE_METHOD, AML_CLASS_METHOD_CALL,
- AML_TYPE_METHOD_CALL,
- AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE),
+ ACPI_TYPE_METHOD, AML_CLASS_METHOD_CALL,
+ AML_TYPE_METHOD_CALL,
+ AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE),
/* 65 */ ACPI_OP("-ByteList-", ARGP_BYTELIST_OP, ARGI_BYTELIST_OP,
- ACPI_TYPE_ANY, AML_CLASS_ARGUMENT,
- AML_TYPE_LITERAL, 0),
+ ACPI_TYPE_ANY, AML_CLASS_ARGUMENT,
+ AML_TYPE_LITERAL, 0),
/* 66 */ ACPI_OP("-ReservedField-", ARGP_RESERVEDFIELD_OP,
- ARGI_RESERVEDFIELD_OP, ACPI_TYPE_ANY,
- AML_CLASS_INTERNAL, AML_TYPE_BOGUS, 0),
+ ARGI_RESERVEDFIELD_OP, ACPI_TYPE_ANY,
+ AML_CLASS_INTERNAL, AML_TYPE_BOGUS, 0),
/* 67 */ ACPI_OP("-NamedField-", ARGP_NAMEDFIELD_OP, ARGI_NAMEDFIELD_OP,
- ACPI_TYPE_ANY, AML_CLASS_INTERNAL,
- AML_TYPE_BOGUS,
- AML_NSOBJECT | AML_NSOPCODE | AML_NSNODE | AML_NAMED),
+ ACPI_TYPE_ANY, AML_CLASS_INTERNAL,
+ AML_TYPE_BOGUS,
+ AML_NSOBJECT | AML_NSOPCODE | AML_NSNODE | AML_NAMED),
/* 68 */ ACPI_OP("-AccessField-", ARGP_ACCESSFIELD_OP,
- ARGI_ACCESSFIELD_OP, ACPI_TYPE_ANY,
- AML_CLASS_INTERNAL, AML_TYPE_BOGUS, 0),
+ ARGI_ACCESSFIELD_OP, ACPI_TYPE_ANY,
+ AML_CLASS_INTERNAL, AML_TYPE_BOGUS, 0),
/* 69 */ ACPI_OP("-StaticString", ARGP_STATICSTRING_OP,
- ARGI_STATICSTRING_OP, ACPI_TYPE_ANY,
- AML_CLASS_INTERNAL, AML_TYPE_BOGUS, 0),
+ ARGI_STATICSTRING_OP, ACPI_TYPE_ANY,
+ AML_CLASS_INTERNAL, AML_TYPE_BOGUS, 0),
/* 6A */ ACPI_OP("-Return Value-", ARG_NONE, ARG_NONE, ACPI_TYPE_ANY,
- AML_CLASS_RETURN_VALUE, AML_TYPE_RETURN,
- AML_HAS_ARGS | AML_HAS_RETVAL),
+ AML_CLASS_RETURN_VALUE, AML_TYPE_RETURN,
+ AML_HAS_ARGS | AML_HAS_RETVAL),
/* 6B */ ACPI_OP("-UNKNOWN_OP-", ARG_NONE, ARG_NONE, ACPI_TYPE_INVALID,
- AML_CLASS_UNKNOWN, AML_TYPE_BOGUS, AML_HAS_ARGS),
+ AML_CLASS_UNKNOWN, AML_TYPE_BOGUS, AML_HAS_ARGS),
/* 6C */ ACPI_OP("-ASCII_ONLY-", ARG_NONE, ARG_NONE, ACPI_TYPE_ANY,
- AML_CLASS_ASCII, AML_TYPE_BOGUS, AML_HAS_ARGS),
+ AML_CLASS_ASCII, AML_TYPE_BOGUS, AML_HAS_ARGS),
/* 6D */ ACPI_OP("-PREFIX_ONLY-", ARG_NONE, ARG_NONE, ACPI_TYPE_ANY,
- AML_CLASS_PREFIX, AML_TYPE_BOGUS, AML_HAS_ARGS),
+ AML_CLASS_PREFIX, AML_TYPE_BOGUS, AML_HAS_ARGS),
/* ACPI 2.0 opcodes */
/* 6E */ ACPI_OP("QwordConst", ARGP_QWORD_OP, ARGI_QWORD_OP,
- ACPI_TYPE_INTEGER, AML_CLASS_ARGUMENT,
- AML_TYPE_LITERAL, AML_CONSTANT),
+ ACPI_TYPE_INTEGER, AML_CLASS_ARGUMENT,
+ AML_TYPE_LITERAL, AML_CONSTANT),
/* 6F */ ACPI_OP("Package", /* Var */ ARGP_VAR_PACKAGE_OP,
ARGI_VAR_PACKAGE_OP, ACPI_TYPE_PACKAGE,
AML_CLASS_CREATE, AML_TYPE_CREATE_OBJECT,
AML_HAS_ARGS | AML_DEFER),
/* 70 */ ACPI_OP("ConcatenateResTemplate", ARGP_CONCAT_RES_OP,
- ARGI_CONCAT_RES_OP, ACPI_TYPE_ANY,
- AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R,
- AML_FLAGS_EXEC_2A_1T_1R | AML_CONSTANT),
+ ARGI_CONCAT_RES_OP, ACPI_TYPE_ANY,
+ AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R,
+ AML_FLAGS_EXEC_2A_1T_1R | AML_CONSTANT),
/* 71 */ ACPI_OP("Mod", ARGP_MOD_OP, ARGI_MOD_OP, ACPI_TYPE_ANY,
- AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R,
- AML_FLAGS_EXEC_2A_1T_1R | AML_CONSTANT),
+ AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R,
+ AML_FLAGS_EXEC_2A_1T_1R | AML_CONSTANT),
/* 72 */ ACPI_OP("CreateQWordField", ARGP_CREATE_QWORD_FIELD_OP,
- ARGI_CREATE_QWORD_FIELD_OP,
- ACPI_TYPE_BUFFER_FIELD, AML_CLASS_CREATE,
- AML_TYPE_CREATE_FIELD,
- AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE |
- AML_DEFER | AML_CREATE),
+ ARGI_CREATE_QWORD_FIELD_OP,
+ ACPI_TYPE_BUFFER_FIELD, AML_CLASS_CREATE,
+ AML_TYPE_CREATE_FIELD,
+ AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE |
+ AML_DEFER | AML_CREATE),
/* 73 */ ACPI_OP("ToBuffer", ARGP_TO_BUFFER_OP, ARGI_TO_BUFFER_OP,
- ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
- AML_TYPE_EXEC_1A_1T_1R,
- AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT),
+ ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
+ AML_TYPE_EXEC_1A_1T_1R,
+ AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT),
/* 74 */ ACPI_OP("ToDecimalString", ARGP_TO_DEC_STR_OP,
- ARGI_TO_DEC_STR_OP, ACPI_TYPE_ANY,
- AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R,
- AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT),
+ ARGI_TO_DEC_STR_OP, ACPI_TYPE_ANY,
+ AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R,
+ AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT),
/* 75 */ ACPI_OP("ToHexString", ARGP_TO_HEX_STR_OP, ARGI_TO_HEX_STR_OP,
- ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
- AML_TYPE_EXEC_1A_1T_1R,
- AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT),
+ ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
+ AML_TYPE_EXEC_1A_1T_1R,
+ AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT),
/* 76 */ ACPI_OP("ToInteger", ARGP_TO_INTEGER_OP, ARGI_TO_INTEGER_OP,
- ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
- AML_TYPE_EXEC_1A_1T_1R,
- AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT),
+ ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
+ AML_TYPE_EXEC_1A_1T_1R,
+ AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT),
/* 77 */ ACPI_OP("ToString", ARGP_TO_STRING_OP, ARGI_TO_STRING_OP,
- ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
- AML_TYPE_EXEC_2A_1T_1R,
- AML_FLAGS_EXEC_2A_1T_1R | AML_CONSTANT),
+ ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
+ AML_TYPE_EXEC_2A_1T_1R,
+ AML_FLAGS_EXEC_2A_1T_1R | AML_CONSTANT),
/* 78 */ ACPI_OP("CopyObject", ARGP_COPY_OP, ARGI_COPY_OP,
- ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
- AML_TYPE_EXEC_1A_1T_1R, AML_FLAGS_EXEC_1A_1T_1R),
+ ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
+ AML_TYPE_EXEC_1A_1T_1R, AML_FLAGS_EXEC_1A_1T_1R),
/* 79 */ ACPI_OP("Mid", ARGP_MID_OP, ARGI_MID_OP, ACPI_TYPE_ANY,
- AML_CLASS_EXECUTE, AML_TYPE_EXEC_3A_1T_1R,
- AML_FLAGS_EXEC_3A_1T_1R | AML_CONSTANT),
+ AML_CLASS_EXECUTE, AML_TYPE_EXEC_3A_1T_1R,
+ AML_FLAGS_EXEC_3A_1T_1R | AML_CONSTANT),
/* 7A */ ACPI_OP("Continue", ARGP_CONTINUE_OP, ARGI_CONTINUE_OP,
- ACPI_TYPE_ANY, AML_CLASS_CONTROL, AML_TYPE_CONTROL, 0),
+ ACPI_TYPE_ANY, AML_CLASS_CONTROL, AML_TYPE_CONTROL, 0),
/* 7B */ ACPI_OP("LoadTable", ARGP_LOAD_TABLE_OP, ARGI_LOAD_TABLE_OP,
- ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
- AML_TYPE_EXEC_6A_0T_1R, AML_FLAGS_EXEC_6A_0T_1R),
+ ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
+ AML_TYPE_EXEC_6A_0T_1R, AML_FLAGS_EXEC_6A_0T_1R),
/* 7C */ ACPI_OP("DataTableRegion", ARGP_DATA_REGION_OP,
- ARGI_DATA_REGION_OP, ACPI_TYPE_REGION,
- AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_SIMPLE,
- AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
- AML_NSNODE | AML_NAMED),
+ ARGI_DATA_REGION_OP, ACPI_TYPE_REGION,
+ AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_SIMPLE,
+ AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
+ AML_NSNODE | AML_NAMED),
/* 7D */ ACPI_OP("[EvalSubTree]", ARGP_SCOPE_OP, ARGI_SCOPE_OP,
- ACPI_TYPE_ANY, AML_CLASS_NAMED_OBJECT,
- AML_TYPE_NAMED_NO_OBJ,
- AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
- AML_NSNODE),
+ ACPI_TYPE_ANY, AML_CLASS_NAMED_OBJECT,
+ AML_TYPE_NAMED_NO_OBJ,
+ AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | AML_NSNODE),
/* ACPI 3.0 opcodes */
/* 7E */ ACPI_OP("Timer", ARGP_TIMER_OP, ARGI_TIMER_OP, ACPI_TYPE_ANY,
- AML_CLASS_EXECUTE, AML_TYPE_EXEC_0A_0T_1R,
- AML_FLAGS_EXEC_0A_0T_1R)
+ AML_CLASS_EXECUTE, AML_TYPE_EXEC_0A_0T_1R,
+ AML_FLAGS_EXEC_0A_0T_1R)
/*! [End] no source code translation !*/
};
diff --git a/drivers/acpi/resources/rscalc.c b/drivers/acpi/resources/rscalc.c
index 8c6d3fdec38..0dd2ce8a347 100644
--- a/drivers/acpi/resources/rscalc.c
+++ b/drivers/acpi/resources/rscalc.c
@@ -567,7 +567,8 @@ acpi_rs_get_pci_routing_table_length(union acpi_operand_object *package_object,
(*sub_object_list)->string.
length + 1);
} else {
- temp_size_needed += acpi_ns_get_pathname_length((*sub_object_list)->reference.node);
+ temp_size_needed +=
+ acpi_ns_get_pathname_length((*sub_object_list)->reference.node);
}
} else {
/*
diff --git a/drivers/acpi/resources/rscreate.c b/drivers/acpi/resources/rscreate.c
index cc48ab05676..50da494c3ee 100644
--- a/drivers/acpi/resources/rscreate.c
+++ b/drivers/acpi/resources/rscreate.c
@@ -267,16 +267,19 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
* If BIOS erroneously reversed the _PRT source_name and source_index,
* then reverse them back.
*/
- if (ACPI_GET_OBJECT_TYPE (sub_object_list[3]) != ACPI_TYPE_INTEGER) {
+ if (ACPI_GET_OBJECT_TYPE(sub_object_list[3]) !=
+ ACPI_TYPE_INTEGER) {
if (acpi_gbl_enable_interpreter_slack) {
source_name_index = 3;
source_index_index = 2;
- printk(KERN_WARNING "ACPI: Handling Garbled _PRT entry\n");
+ printk(KERN_WARNING
+ "ACPI: Handling Garbled _PRT entry\n");
} else {
ACPI_ERROR((AE_INFO,
- "(PRT[%X].source_index) Need Integer, found %s",
- index,
- acpi_ut_get_object_type_name(sub_object_list[3])));
+ "(PRT[%X].source_index) Need Integer, found %s",
+ index,
+ acpi_ut_get_object_type_name
+ (sub_object_list[3])));
return_ACPI_STATUS(AE_BAD_DATA);
}
}
diff --git a/drivers/acpi/resources/rsdump.c b/drivers/acpi/resources/rsdump.c
index de20a5d6dec..46da116a403 100644
--- a/drivers/acpi/resources/rsdump.c
+++ b/drivers/acpi/resources/rsdump.c
@@ -46,7 +46,6 @@
#define _COMPONENT ACPI_RESOURCES
ACPI_MODULE_NAME("rsdump")
-
#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
/* Local prototypes */
static void acpi_rs_out_string(char *title, char *value);
@@ -489,10 +488,9 @@ acpi_rs_dump_descriptor(void *resource, struct acpi_rsdump_info *table)
/*
* Optional resource_source for Address resources
*/
- acpi_rs_dump_resource_source(ACPI_CAST_PTR
- (struct
- acpi_resource_source,
- target));
+ acpi_rs_dump_resource_source(ACPI_CAST_PTR(struct
+ acpi_resource_source,
+ target));
break;
default:
diff --git a/drivers/acpi/resources/rsinfo.c b/drivers/acpi/resources/rsinfo.c
index 7e3c335ab32..2c2adb6292c 100644
--- a/drivers/acpi/resources/rsinfo.c
+++ b/drivers/acpi/resources/rsinfo.c
@@ -142,7 +142,7 @@ struct acpi_rsdump_info *acpi_gbl_dump_resource_dispatch[] = {
};
#endif
-#endif /* ACPI_FUTURE_USAGE */
+#endif /* ACPI_FUTURE_USAGE */
/*
* Base sizes for external AML resource descriptors, indexed by internal type.
* Includes size of the descriptor header (1 byte for small descriptors,
diff --git a/drivers/acpi/resources/rslist.c b/drivers/acpi/resources/rslist.c
index a92755c8877..ca21e4660c7 100644
--- a/drivers/acpi/resources/rslist.c
+++ b/drivers/acpi/resources/rslist.c
@@ -153,10 +153,9 @@ acpi_rs_convert_resources_to_aml(struct acpi_resource *resource,
/* Perform the conversion */
- status = acpi_rs_convert_resource_to_aml(resource,
- ACPI_CAST_PTR(union
- aml_resource,
- aml),
+ status = acpi_rs_convert_resource_to_aml(resource, ACPI_CAST_PTR(union
+ aml_resource,
+ aml),
acpi_gbl_set_resource_dispatch
[resource->type]);
if (ACPI_FAILURE(status)) {
diff --git a/drivers/acpi/resources/rsmisc.c b/drivers/acpi/resources/rsmisc.c
index 3b63b561b94..c7081afa893 100644
--- a/drivers/acpi/resources/rsmisc.c
+++ b/drivers/acpi/resources/rsmisc.c
@@ -46,7 +46,6 @@
#define _COMPONENT ACPI_RESOURCES
ACPI_MODULE_NAME("rsmisc")
-
#define INIT_RESOURCE_TYPE(i) i->resource_offset
#define INIT_RESOURCE_LENGTH(i) i->aml_offset
#define INIT_TABLE_LENGTH(i) i->value
@@ -429,8 +428,7 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource,
* Optional resource_source (Index and String)
*/
aml_length =
- acpi_rs_set_resource_source(aml,
- (acpi_rs_length)
+ acpi_rs_set_resource_source(aml, (acpi_rs_length)
aml_length, source);
acpi_rs_set_resource_length(aml_length, aml);
break;
diff --git a/drivers/acpi/resources/rsutils.c b/drivers/acpi/resources/rsutils.c
index 2442a8f8df5..11c0bd7b9cf 100644
--- a/drivers/acpi/resources/rsutils.c
+++ b/drivers/acpi/resources/rsutils.c
@@ -353,10 +353,8 @@ acpi_rs_get_resource_source(acpi_rs_length resource_length,
*
* Zero the entire area of the buffer.
*/
- total_length =
- (u32)
- ACPI_STRLEN(ACPI_CAST_PTR(char, &aml_resource_source[1])) +
- 1;
+ total_length = (u32)
+ ACPI_STRLEN(ACPI_CAST_PTR(char, &aml_resource_source[1])) + 1;
total_length = (u32) ACPI_ROUND_UP_TO_NATIVE_WORD(total_length);
ACPI_MEMSET(resource_source->string_ptr, 0, total_length);
diff --git a/drivers/acpi/resources/rsxface.c b/drivers/acpi/resources/rsxface.c
index 991f8901498..f63813a358c 100644
--- a/drivers/acpi/resources/rsxface.c
+++ b/drivers/acpi/resources/rsxface.c
@@ -217,7 +217,6 @@ acpi_get_current_resources(acpi_handle device_handle,
}
ACPI_EXPORT_SYMBOL(acpi_get_current_resources)
-
#ifdef ACPI_FUTURE_USAGE
/*******************************************************************************
*
@@ -261,7 +260,6 @@ acpi_get_possible_resources(acpi_handle device_handle,
ACPI_EXPORT_SYMBOL(acpi_get_possible_resources)
#endif /* ACPI_FUTURE_USAGE */
-
/*******************************************************************************
*
* FUNCTION: acpi_set_current_resources
@@ -496,7 +494,6 @@ ACPI_EXPORT_SYMBOL(acpi_rs_match_vendor_resource)
* each resource in the list.
*
******************************************************************************/
-
acpi_status
acpi_walk_resources(acpi_handle device_handle,
char *name,
diff --git a/drivers/acpi/sleep/main.c b/drivers/acpi/sleep/main.c
index f8c63410bcb..bc7e16ec839 100644
--- a/drivers/acpi/sleep/main.c
+++ b/drivers/acpi/sleep/main.c
@@ -29,7 +29,6 @@ static u32 acpi_suspend_states[] = {
[PM_SUSPEND_ON] = ACPI_STATE_S0,
[PM_SUSPEND_STANDBY] = ACPI_STATE_S1,
[PM_SUSPEND_MEM] = ACPI_STATE_S3,
- [PM_SUSPEND_DISK] = ACPI_STATE_S4,
[PM_SUSPEND_MAX] = ACPI_STATE_S5
};
@@ -94,14 +93,6 @@ static int acpi_pm_enter(suspend_state_t pm_state)
do_suspend_lowlevel();
break;
- case PM_SUSPEND_DISK:
- if (acpi_pm_ops.pm_disk_mode == PM_DISK_PLATFORM)
- status = acpi_enter_sleep_state(acpi_state);
- break;
- case PM_SUSPEND_MAX:
- acpi_power_off();
- break;
-
default:
return -EINVAL;
}
@@ -157,12 +148,13 @@ int acpi_suspend(u32 acpi_state)
suspend_state_t states[] = {
[1] = PM_SUSPEND_STANDBY,
[3] = PM_SUSPEND_MEM,
- [4] = PM_SUSPEND_DISK,
[5] = PM_SUSPEND_MAX
};
if (acpi_state < 6 && states[acpi_state])
return pm_suspend(states[acpi_state]);
+ if (acpi_state == 4)
+ return hibernate();
return -EINVAL;
}
@@ -189,6 +181,49 @@ static struct pm_ops acpi_pm_ops = {
.finish = acpi_pm_finish,
};
+#ifdef CONFIG_SOFTWARE_SUSPEND
+static int acpi_hibernation_prepare(void)
+{
+ return acpi_sleep_prepare(ACPI_STATE_S4);
+}
+
+static int acpi_hibernation_enter(void)
+{
+ acpi_status status = AE_OK;
+ unsigned long flags = 0;
+
+ ACPI_FLUSH_CPU_CACHE();
+
+ local_irq_save(flags);
+ acpi_enable_wakeup_device(ACPI_STATE_S4);
+ /* This shouldn't return. If it returns, we have a problem */
+ status = acpi_enter_sleep_state(ACPI_STATE_S4);
+ local_irq_restore(flags);
+
+ return ACPI_SUCCESS(status) ? 0 : -EFAULT;
+}
+
+static void acpi_hibernation_finish(void)
+{
+ acpi_leave_sleep_state(ACPI_STATE_S4);
+ acpi_disable_wakeup_device(ACPI_STATE_S4);
+
+ /* reset firmware waking vector */
+ acpi_set_firmware_waking_vector((acpi_physical_address) 0);
+
+ if (init_8259A_after_S1) {
+ printk("Broken toshiba laptop -> kicking interrupts\n");
+ init_8259A(0);
+ }
+}
+
+static struct hibernation_ops acpi_hibernation_ops = {
+ .prepare = acpi_hibernation_prepare,
+ .enter = acpi_hibernation_enter,
+ .finish = acpi_hibernation_finish,
+};
+#endif /* CONFIG_SOFTWARE_SUSPEND */
+
/*
* Toshiba fails to preserve interrupts over S1, reinitialization
* of 8259 is needed after S1 resume.
@@ -227,14 +262,17 @@ int __init acpi_sleep_init(void)
sleep_states[i] = 1;
printk(" S%d", i);
}
- if (i == ACPI_STATE_S4) {
- if (sleep_states[i])
- acpi_pm_ops.pm_disk_mode = PM_DISK_PLATFORM;
- }
}
printk(")\n");
pm_set_ops(&acpi_pm_ops);
+
+#ifdef CONFIG_SOFTWARE_SUSPEND
+ if (sleep_states[ACPI_STATE_S4])
+ hibernation_set_ops(&acpi_hibernation_ops);
+#else
+ sleep_states[ACPI_STATE_S4] = 0;
+#endif
+
return 0;
}
-
diff --git a/drivers/acpi/sleep/proc.c b/drivers/acpi/sleep/proc.c
index 5a76e5be61d..61f1822cc35 100644
--- a/drivers/acpi/sleep/proc.c
+++ b/drivers/acpi/sleep/proc.c
@@ -60,7 +60,7 @@ acpi_system_write_sleep(struct file *file,
state = simple_strtoul(str, NULL, 0);
#ifdef CONFIG_SOFTWARE_SUSPEND
if (state == 4) {
- error = pm_suspend(PM_SUSPEND_DISK);
+ error = hibernate();
goto Done;
}
#endif
@@ -349,8 +349,7 @@ acpi_system_write_alarm(struct file *file,
end:
return_VALUE(result ? result : count);
}
-#endif /* HAVE_ACPI_LEGACY_ALARM */
-
+#endif /* HAVE_ACPI_LEGACY_ALARM */
extern struct list_head acpi_wakeup_device_list;
extern spinlock_t acpi_device_lock;
@@ -380,8 +379,8 @@ acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset)
dev->wakeup.state.enabled ? "enabled" : "disabled");
if (ldev)
seq_printf(seq, "%s:%s",
- ldev->bus ? ldev->bus->name : "no-bus",
- ldev->bus_id);
+ ldev->bus ? ldev->bus->name : "no-bus",
+ ldev->bus_id);
seq_printf(seq, "\n");
put_device(ldev);
@@ -490,7 +489,7 @@ static u32 rtc_handler(void *context)
return ACPI_INTERRUPT_HANDLED;
}
-#endif /* HAVE_ACPI_LEGACY_ALARM */
+#endif /* HAVE_ACPI_LEGACY_ALARM */
static int __init acpi_sleep_proc_init(void)
{
@@ -517,7 +516,7 @@ static int __init acpi_sleep_proc_init(void)
entry->proc_fops = &acpi_system_alarm_fops;
acpi_install_fixed_event_handler(ACPI_EVENT_RTC, rtc_handler, NULL);
-#endif /* HAVE_ACPI_LEGACY_ALARM */
+#endif /* HAVE_ACPI_LEGACY_ALARM */
/* 'wakeup device' [R/W] */
entry =
diff --git a/drivers/acpi/tables/tbfadt.c b/drivers/acpi/tables/tbfadt.c
index 1db833eb241..1285e91474f 100644
--- a/drivers/acpi/tables/tbfadt.c
+++ b/drivers/acpi/tables/tbfadt.c
@@ -334,7 +334,8 @@ static void acpi_tb_convert_fadt(void)
(acpi_gbl_FADT.xpm1a_event_block.address +
pm1_register_length));
/* Don't forget to copy space_id of the GAS */
- acpi_gbl_xpm1a_enable.space_id = acpi_gbl_FADT.xpm1a_event_block.space_id;
+ acpi_gbl_xpm1a_enable.space_id =
+ acpi_gbl_FADT.xpm1a_event_block.space_id;
/* The PM1B register block is optional, ignore if not present */
@@ -344,7 +345,8 @@ static void acpi_tb_convert_fadt(void)
(acpi_gbl_FADT.xpm1b_event_block.
address + pm1_register_length));
/* Don't forget to copy space_id of the GAS */
- acpi_gbl_xpm1b_enable.space_id = acpi_gbl_FADT.xpm1a_event_block.space_id;
+ acpi_gbl_xpm1b_enable.space_id =
+ acpi_gbl_FADT.xpm1a_event_block.space_id;
}
diff --git a/drivers/acpi/tables/tbxface.c b/drivers/acpi/tables/tbxface.c
index 417ef5fa766..5b302c4e293 100644
--- a/drivers/acpi/tables/tbxface.c
+++ b/drivers/acpi/tables/tbxface.c
@@ -201,6 +201,7 @@ acpi_status acpi_reallocate_root_table(void)
return_ACPI_STATUS(AE_OK);
}
+
/*******************************************************************************
*
* FUNCTION: acpi_load_table
@@ -262,7 +263,7 @@ ACPI_EXPORT_SYMBOL(acpi_load_table)
acpi_status
acpi_get_table_header(char *signature,
acpi_native_uint instance,
- struct acpi_table_header *out_table_header)
+ struct acpi_table_header * out_table_header)
{
acpi_native_uint i;
acpi_native_uint j;
@@ -321,7 +322,6 @@ acpi_get_table_header(char *signature,
ACPI_EXPORT_SYMBOL(acpi_get_table_header)
-
/******************************************************************************
*
* FUNCTION: acpi_unload_table_id
@@ -346,11 +346,11 @@ acpi_status acpi_unload_table_id(acpi_owner_id id)
continue;
}
/*
- * Delete all namespace objects owned by this table. Note that these
- * objects can appear anywhere in the namespace by virtue of the AML
- * "Scope" operator. Thus, we need to track ownership by an ID, not
- * simply a position within the hierarchy
- */
+ * Delete all namespace objects owned by this table. Note that these
+ * objects can appear anywhere in the namespace by virtue of the AML
+ * "Scope" operator. Thus, we need to track ownership by an ID, not
+ * simply a position within the hierarchy
+ */
acpi_tb_delete_namespace_by_owner(i);
status = acpi_tb_release_owner_id(i);
acpi_tb_set_table_loaded_flag(i, FALSE);
@@ -376,7 +376,7 @@ ACPI_EXPORT_SYMBOL(acpi_unload_table_id)
*****************************************************************************/
acpi_status
acpi_get_table(char *signature,
- acpi_native_uint instance, struct acpi_table_header ** out_table)
+ acpi_native_uint instance, struct acpi_table_header **out_table)
{
acpi_native_uint i;
acpi_native_uint j;
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 589b98b7b21..1ada017d01e 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -59,8 +59,6 @@
#define ACPI_THERMAL_NOTIFY_CRITICAL 0xF0
#define ACPI_THERMAL_NOTIFY_HOT 0xF1
#define ACPI_THERMAL_MODE_ACTIVE 0x00
-#define ACPI_THERMAL_MODE_PASSIVE 0x01
-#define ACPI_THERMAL_MODE_CRITICAL 0xff
#define ACPI_THERMAL_PATH_POWEROFF "/sbin/poweroff"
#define ACPI_THERMAL_MAX_ACTIVE 10
@@ -86,9 +84,6 @@ static int acpi_thermal_resume(struct acpi_device *device);
static int acpi_thermal_state_open_fs(struct inode *inode, struct file *file);
static int acpi_thermal_temp_open_fs(struct inode *inode, struct file *file);
static int acpi_thermal_trip_open_fs(struct inode *inode, struct file *file);
-static ssize_t acpi_thermal_write_trip_points(struct file *,
- const char __user *, size_t,
- loff_t *);
static int acpi_thermal_cooling_open_fs(struct inode *inode, struct file *file);
static ssize_t acpi_thermal_write_cooling_mode(struct file *,
const char __user *, size_t,
@@ -167,7 +162,6 @@ struct acpi_thermal {
unsigned long temperature;
unsigned long last_temperature;
unsigned long polling_frequency;
- u8 cooling_mode;
volatile u8 zombie;
struct acpi_thermal_flags flags;
struct acpi_thermal_state state;
@@ -193,7 +187,6 @@ static const struct file_operations acpi_thermal_temp_fops = {
static const struct file_operations acpi_thermal_trip_fops = {
.open = acpi_thermal_trip_open_fs,
.read = seq_read,
- .write = acpi_thermal_write_trip_points,
.llseek = seq_lseek,
.release = single_release,
};
@@ -297,11 +290,6 @@ static int acpi_thermal_set_cooling_mode(struct acpi_thermal *tz, int mode)
if (ACPI_FAILURE(status))
return -ENODEV;
- tz->cooling_mode = mode;
-
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Cooling mode [%s]\n",
- mode ? "passive" : "active"));
-
return 0;
}
@@ -889,67 +877,6 @@ static int acpi_thermal_trip_open_fs(struct inode *inode, struct file *file)
return single_open(file, acpi_thermal_trip_seq_show, PDE(inode)->data);
}
-static ssize_t
-acpi_thermal_write_trip_points(struct file *file,
- const char __user * buffer,
- size_t count, loff_t * ppos)
-{
- struct seq_file *m = file->private_data;
- struct acpi_thermal *tz = m->private;
-
- char *limit_string;
- int num, critical, hot, passive;
- int *active;
- int i = 0;
-
-
- limit_string = kzalloc(ACPI_THERMAL_MAX_LIMIT_STR_LEN, GFP_KERNEL);
- if (!limit_string)
- return -ENOMEM;
-
- active = kmalloc(ACPI_THERMAL_MAX_ACTIVE * sizeof(int), GFP_KERNEL);
- if (!active) {
- kfree(limit_string);
- return -ENOMEM;
- }
-
- if (!tz || (count > ACPI_THERMAL_MAX_LIMIT_STR_LEN - 1)) {
- count = -EINVAL;
- goto end;
- }
-
- if (copy_from_user(limit_string, buffer, count)) {
- count = -EFAULT;
- goto end;
- }
-
- limit_string[count] = '\0';
-
- num = sscanf(limit_string, "%d:%d:%d:%d:%d:%d:%d:%d:%d:%d:%d:%d:%d",
- &critical, &hot, &passive,
- &active[0], &active[1], &active[2], &active[3], &active[4],
- &active[5], &active[6], &active[7], &active[8],
- &active[9]);
- if (!(num >= 5 && num < (ACPI_THERMAL_MAX_ACTIVE + 3))) {
- count = -EINVAL;
- goto end;
- }
-
- tz->trips.critical.temperature = CELSIUS_TO_KELVIN(critical);
- tz->trips.hot.temperature = CELSIUS_TO_KELVIN(hot);
- tz->trips.passive.temperature = CELSIUS_TO_KELVIN(passive);
- for (i = 0; i < num - 3; i++) {
- if (!(tz->trips.active[i].flags.valid))
- break;
- tz->trips.active[i].temperature = CELSIUS_TO_KELVIN(active[i]);
- }
-
- end:
- kfree(active);
- kfree(limit_string);
- return count;
-}
-
static int acpi_thermal_cooling_seq_show(struct seq_file *seq, void *offset)
{
struct acpi_thermal *tz = seq->private;
@@ -958,15 +885,10 @@ static int acpi_thermal_cooling_seq_show(struct seq_file *seq, void *offset)
if (!tz)
goto end;
- if (!tz->flags.cooling_mode) {
+ if (!tz->flags.cooling_mode)
seq_puts(seq, "<setting not supported>\n");
- }
-
- if (tz->cooling_mode == ACPI_THERMAL_MODE_CRITICAL)
- seq_printf(seq, "cooling mode: critical\n");
else
- seq_printf(seq, "cooling mode: %s\n",
- tz->cooling_mode ? "passive" : "active");
+ seq_puts(seq, "0 - Active; 1 - Passive\n");
end:
return 0;
@@ -1223,28 +1145,6 @@ static int acpi_thermal_get_info(struct acpi_thermal *tz)
result = acpi_thermal_set_cooling_mode(tz, ACPI_THERMAL_MODE_ACTIVE);
if (!result)
tz->flags.cooling_mode = 1;
- else {
- /* Oh,we have not _SCP method.
- Generally show cooling_mode by _ACx, _PSV,spec 12.2 */
- tz->flags.cooling_mode = 0;
- if (tz->trips.active[0].flags.valid
- && tz->trips.passive.flags.valid) {
- if (tz->trips.passive.temperature >
- tz->trips.active[0].temperature)
- tz->cooling_mode = ACPI_THERMAL_MODE_ACTIVE;
- else
- tz->cooling_mode = ACPI_THERMAL_MODE_PASSIVE;
- } else if (!tz->trips.active[0].flags.valid
- && tz->trips.passive.flags.valid) {
- tz->cooling_mode = ACPI_THERMAL_MODE_PASSIVE;
- } else if (tz->trips.active[0].flags.valid
- && !tz->trips.passive.flags.valid) {
- tz->cooling_mode = ACPI_THERMAL_MODE_ACTIVE;
- } else {
- /* _ACx and _PSV are optional, but _CRT is required */
- tz->cooling_mode = ACPI_THERMAL_MODE_CRITICAL;
- }
- }
/* Get default polling frequency [_TZP] (optional) */
if (tzp)
diff --git a/drivers/acpi/utilities/utalloc.c b/drivers/acpi/utilities/utalloc.c
index 55a76480749..6e56d5f7c43 100644
--- a/drivers/acpi/utilities/utalloc.c
+++ b/drivers/acpi/utilities/utalloc.c
@@ -107,7 +107,6 @@ acpi_status acpi_ut_create_caches(void)
if (ACPI_FAILURE(status)) {
return (status);
}
-
#ifdef ACPI_DBG_TRACK_ALLOCATIONS
/* Memory allocation lists */
diff --git a/drivers/acpi/utilities/utcache.c b/drivers/acpi/utilities/utcache.c
index 870f6edeb5f..285a0f53176 100644
--- a/drivers/acpi/utilities/utcache.c
+++ b/drivers/acpi/utilities/utcache.c
@@ -45,7 +45,6 @@
#define _COMPONENT ACPI_UTILITIES
ACPI_MODULE_NAME("utcache")
-
#ifdef ACPI_USE_LOCAL_CACHE
/*******************************************************************************
*
@@ -64,7 +63,7 @@ ACPI_MODULE_NAME("utcache")
acpi_status
acpi_os_create_cache(char *cache_name,
u16 object_size,
- u16 max_depth, struct acpi_memory_list **return_cache)
+ u16 max_depth, struct acpi_memory_list ** return_cache)
{
struct acpi_memory_list *cache;
diff --git a/drivers/acpi/utilities/utcopy.c b/drivers/acpi/utilities/utcopy.c
index 84d529db0a6..4c1e00874df 100644
--- a/drivers/acpi/utilities/utcopy.c
+++ b/drivers/acpi/utilities/utcopy.c
@@ -814,7 +814,9 @@ acpi_ut_copy_ielement_to_ielement(u8 object_type,
/*
* Create the object array
*/
- target_object->package.elements = ACPI_ALLOCATE_ZEROED(((acpi_size) source_object->package.count + 1) * sizeof(void *));
+ target_object->package.elements =
+ ACPI_ALLOCATE_ZEROED(((acpi_size) source_object->package.
+ count + 1) * sizeof(void *));
if (!target_object->package.elements) {
status = AE_NO_MEMORY;
goto error_exit;
diff --git a/drivers/acpi/utilities/utdebug.c b/drivers/acpi/utilities/utdebug.c
index 61ad4f2daee..c7e128e5369 100644
--- a/drivers/acpi/utilities/utdebug.c
+++ b/drivers/acpi/utilities/utdebug.c
@@ -45,7 +45,6 @@
#define _COMPONENT ACPI_UTILITIES
ACPI_MODULE_NAME("utdebug")
-
#ifdef ACPI_DEBUG_OUTPUT
static acpi_thread_id acpi_gbl_prev_thread_id;
static char *acpi_gbl_fn_entry_str = "----Entry";
@@ -181,7 +180,8 @@ acpi_ut_debug_print(u32 requested_debug_level,
if (ACPI_LV_THREADS & acpi_dbg_level) {
acpi_os_printf
("\n**** Context Switch from TID %lX to TID %lX ****\n\n",
- (unsigned long)acpi_gbl_prev_thread_id, (unsigned long)thread_id);
+ (unsigned long)acpi_gbl_prev_thread_id,
+ (unsigned long)thread_id);
}
acpi_gbl_prev_thread_id = thread_id;
diff --git a/drivers/acpi/utilities/utdelete.c b/drivers/acpi/utilities/utdelete.c
index 673a0caa407..f777cebdc46 100644
--- a/drivers/acpi/utilities/utdelete.c
+++ b/drivers/acpi/utilities/utdelete.c
@@ -170,6 +170,7 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object)
acpi_os_delete_mutex(object->mutex.os_mutex);
acpi_gbl_global_lock_mutex = NULL;
} else {
+ acpi_ex_unlink_mutex(object);
acpi_os_delete_mutex(object->mutex.os_mutex);
}
break;
diff --git a/drivers/acpi/utilities/utglobal.c b/drivers/acpi/utilities/utglobal.c
index af33358a964..1621655d6e2 100644
--- a/drivers/acpi/utilities/utglobal.c
+++ b/drivers/acpi/utilities/utglobal.c
@@ -55,12 +55,10 @@ ACPI_EXPORT_SYMBOL(acpi_gbl_FADT)
* Static global variable initialization.
*
******************************************************************************/
-
/*
* We want the debug switches statically initialized so they
* are already set when the debugger is entered.
*/
-
/* Debug switch - level and trace mask */
u32 acpi_dbg_level = ACPI_DEBUG_DEFAULT;
@@ -735,5 +733,5 @@ void acpi_ut_init_globals(void)
}
ACPI_EXPORT_SYMBOL(acpi_dbg_level)
-ACPI_EXPORT_SYMBOL(acpi_dbg_layer)
-ACPI_EXPORT_SYMBOL(acpi_gpe_count)
+ ACPI_EXPORT_SYMBOL(acpi_dbg_layer)
+ ACPI_EXPORT_SYMBOL(acpi_gpe_count)
diff --git a/drivers/acpi/utilities/utmisc.c b/drivers/acpi/utilities/utmisc.c
index 50133fffe42..2d19f71e9cf 100644
--- a/drivers/acpi/utilities/utmisc.c
+++ b/drivers/acpi/utilities/utmisc.c
@@ -802,9 +802,8 @@ acpi_ut_strtoul64(char *string, u32 base, acpi_integer * ret_integer)
valid_digits++;
- if (sign_of0x
- && ((valid_digits > 16)
- || ((valid_digits > 8) && mode32))) {
+ if (sign_of0x && ((valid_digits > 16)
+ || ((valid_digits > 8) && mode32))) {
/*
* This is to_integer operation case.
* No any restrictions for string-to-integer conversion,
@@ -1049,6 +1048,7 @@ acpi_ut_exception(char *module_name,
acpi_os_vprintf(format, args);
acpi_os_printf(" [%X]\n", ACPI_CA_VERSION);
}
+
EXPORT_SYMBOL(acpi_ut_exception);
void ACPI_INTERNAL_VAR_XFACE
diff --git a/drivers/acpi/utilities/utmutex.c b/drivers/acpi/utilities/utmutex.c
index cbad2ef5987..4820bc86d1f 100644
--- a/drivers/acpi/utilities/utmutex.c
+++ b/drivers/acpi/utilities/utmutex.c
@@ -244,7 +244,7 @@ acpi_status acpi_ut_acquire_mutex(acpi_mutex_handle mutex_id)
ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
"Thread %lX attempting to acquire Mutex [%s]\n",
- (unsigned long) this_thread_id,
+ (unsigned long)this_thread_id,
acpi_ut_get_mutex_name(mutex_id)));
status = acpi_os_acquire_mutex(acpi_gbl_mutex_info[mutex_id].mutex,
@@ -252,7 +252,7 @@ acpi_status acpi_ut_acquire_mutex(acpi_mutex_handle mutex_id)
if (ACPI_SUCCESS(status)) {
ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
"Thread %lX acquired Mutex [%s]\n",
- (unsigned long) this_thread_id,
+ (unsigned long)this_thread_id,
acpi_ut_get_mutex_name(mutex_id)));
acpi_gbl_mutex_info[mutex_id].use_count++;
@@ -260,7 +260,7 @@ acpi_status acpi_ut_acquire_mutex(acpi_mutex_handle mutex_id)
} else {
ACPI_EXCEPTION((AE_INFO, status,
"Thread %lX could not acquire Mutex [%X]",
- (unsigned long) this_thread_id, mutex_id));
+ (unsigned long)this_thread_id, mutex_id));
}
return (status);
@@ -287,7 +287,7 @@ acpi_status acpi_ut_release_mutex(acpi_mutex_handle mutex_id)
this_thread_id = acpi_os_get_thread_id();
ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
"Thread %lX releasing Mutex [%s]\n",
- (unsigned long) this_thread_id,
+ (unsigned long)this_thread_id,
acpi_ut_get_mutex_name(mutex_id)));
if (mutex_id > ACPI_MAX_MUTEX) {
diff --git a/drivers/acpi/utilities/utresrc.c b/drivers/acpi/utilities/utresrc.c
index e8fe1ba6cc2..cbbd3315a1e 100644
--- a/drivers/acpi/utilities/utresrc.c
+++ b/drivers/acpi/utilities/utresrc.c
@@ -46,7 +46,6 @@
#define _COMPONENT ACPI_UTILITIES
ACPI_MODULE_NAME("utresrc")
-
#if defined(ACPI_DISASSEMBLER) || defined (ACPI_DEBUGGER)
/*
* Strings used to decode resource descriptors.
diff --git a/drivers/acpi/utilities/utxface.c b/drivers/acpi/utilities/utxface.c
index de3276f4f46..e9a57806cd3 100644
--- a/drivers/acpi/utilities/utxface.c
+++ b/drivers/acpi/utilities/utxface.c
@@ -337,7 +337,6 @@ acpi_status acpi_terminate(void)
}
ACPI_EXPORT_SYMBOL(acpi_terminate)
-
#ifdef ACPI_FUTURE_USAGE
/*******************************************************************************
*
@@ -470,7 +469,6 @@ acpi_install_initialization_handler(acpi_init_handler handler, u32 function)
ACPI_EXPORT_SYMBOL(acpi_install_initialization_handler)
#endif /* ACPI_FUTURE_USAGE */
-
/*****************************************************************************
*
* FUNCTION: acpi_purge_cached_objects
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 45dbdc14915..f031b873233 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -3,6 +3,7 @@
#
menu "Serial ATA (prod) and Parallel ATA (experimental) drivers"
+ depends on HAS_IOMEM
config ATA
tristate "ATA device support"
@@ -435,7 +436,7 @@ config PATA_OPTIDMA
help
This option enables DMA/PIO support for the later OPTi
controllers found on some old motherboards and in some
- latops
+ laptops.
If unsure, say N.
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
index 03a0acff6cf..cb3eab6e379 100644
--- a/drivers/ata/libata-acpi.c
+++ b/drivers/ata/libata-acpi.c
@@ -489,8 +489,7 @@ static void taskfile_load_raw(struct ata_port *ap,
/* convert gtf to tf */
tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; /* TBD */
- tf.protocol = atadev->class == ATA_DEV_ATAPI ?
- ATA_PROT_ATAPI_NODATA : ATA_PROT_NODATA;
+ tf.protocol = ATA_PROT_NODATA;
tf.feature = gtf->tfa[0]; /* 0x1f1 */
tf.nsect = gtf->tfa[1]; /* 0x1f2 */
tf.lbal = gtf->tfa[2]; /* 0x1f3 */
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index a7950885d18..4595d1f8cf6 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -895,6 +895,7 @@ static u64 ata_read_native_max_address(struct ata_device *dev)
/**
* ata_set_native_max_address_ext - LBA48 native max set
* @dev: Device to query
+ * @new_sectors: new max sectors value to set for the device
*
* Perform an LBA48 size set max upon the device in question. Return the
* actual LBA48 size or zero if the command fails.
@@ -932,6 +933,7 @@ static u64 ata_set_native_max_address_ext(struct ata_device *dev, u64 new_sector
/**
* ata_set_native_max_address - LBA28 native max set
* @dev: Device to query
+ * @new_sectors: new max sectors value to set for the device
*
* Perform an LBA28 size set max upon the device in question. Return the
* actual LBA28 size or zero if the command fails.
@@ -1316,7 +1318,7 @@ void ata_port_flush_task(struct ata_port *ap)
spin_unlock_irqrestore(ap->lock, flags);
DPRINTK("flush #1\n");
- flush_workqueue(ata_wq);
+ cancel_work_sync(&ap->port_task.work); /* akpm: seems unneeded */
/*
* At this point, if a task is running, it's guaranteed to see
@@ -1327,7 +1329,7 @@ void ata_port_flush_task(struct ata_port *ap)
if (ata_msg_ctl(ap))
ata_port_printk(ap, KERN_DEBUG, "%s: flush #2\n",
__FUNCTION__);
- flush_workqueue(ata_wq);
+ cancel_work_sync(&ap->port_task.work);
}
spin_lock_irqsave(ap->lock, flags);
@@ -6475,9 +6477,9 @@ void ata_port_detach(struct ata_port *ap)
/* Flush hotplug task. The sequence is similar to
* ata_port_flush_task().
*/
- flush_workqueue(ata_aux_wq);
+ cancel_work_sync(&ap->hotplug_task.work); /* akpm: why? */
cancel_delayed_work(&ap->hotplug_task);
- flush_workqueue(ata_aux_wq);
+ cancel_work_sync(&ap->hotplug_task.work);
skip_eh:
/* remove the associated SCSI host */
diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c
index 75dc84797ff..11245e331f7 100644
--- a/drivers/ata/pata_pcmcia.c
+++ b/drivers/ata/pata_pcmcia.c
@@ -357,6 +357,7 @@ static struct pcmcia_device_id pcmcia_devices[] = {
PCMCIA_DEVICE_MANF_CARD(0x000a, 0x0000), /* I-O Data CFA */
PCMCIA_DEVICE_MANF_CARD(0x001c, 0x0001), /* Mitsubishi CFA */
PCMCIA_DEVICE_MANF_CARD(0x0032, 0x0704),
+ PCMCIA_DEVICE_MANF_CARD(0x0032, 0x2904),
PCMCIA_DEVICE_MANF_CARD(0x0045, 0x0401), /* SanDisk CFA */
PCMCIA_DEVICE_MANF_CARD(0x0098, 0x0000), /* Toshiba */
PCMCIA_DEVICE_MANF_CARD(0x00a4, 0x002d),
diff --git a/drivers/ata/pata_qdi.c b/drivers/ata/pata_qdi.c
index 27685ce63ce..fb8c9e14b8d 100644
--- a/drivers/ata/pata_qdi.c
+++ b/drivers/ata/pata_qdi.c
@@ -375,7 +375,7 @@ static __init int qdi_init(void)
res = inb(port + 3);
if (res & 1) {
/* Single channel mode */
- if (qdi_init_one(port, 6580, ide_port[r & 0x01], ide_irq[r & 0x01], r & 0x04))
+ if (qdi_init_one(port, 6580, ide_port[r & 0x01], ide_irq[r & 0x01], r & 0x04) == 0)
ct++;
} else {
/* Dual channel mode */
diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c
index 5df354d573e..203f463ac39 100644
--- a/drivers/ata/pata_scc.c
+++ b/drivers/ata/pata_scc.c
@@ -1142,14 +1142,14 @@ static int scc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
static int printed_version;
unsigned int board_idx = (unsigned int) ent->driver_data;
const struct ata_port_info *ppi[] = { &scc_port_info[board_idx], NULL };
- struct device *dev = &pdev->dev;
+ struct ata_host *host;
int rc;
if (!printed_version++)
dev_printk(KERN_DEBUG, &pdev->dev,
"version " DRV_VERSION "\n");
- host = ata_port_alloc_pinfo(&pdev->dev, ppi, 1);
+ host = ata_host_alloc_pinfo(&pdev->dev, ppi, 1);
if (!host)
return -ENOMEM;
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index e2e795e5823..a097595d4dc 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -257,6 +257,8 @@ static void nv_adma_port_stop(struct ata_port *ap);
static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
static int nv_adma_port_resume(struct ata_port *ap);
#endif
+static void nv_adma_freeze(struct ata_port *ap);
+static void nv_adma_thaw(struct ata_port *ap);
static void nv_adma_error_handler(struct ata_port *ap);
static void nv_adma_host_stop(struct ata_host *host);
static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc);
@@ -444,8 +446,8 @@ static const struct ata_port_operations nv_adma_ops = {
.bmdma_status = ata_bmdma_status,
.qc_prep = nv_adma_qc_prep,
.qc_issue = nv_adma_qc_issue,
- .freeze = nv_ck804_freeze,
- .thaw = nv_ck804_thaw,
+ .freeze = nv_adma_freeze,
+ .thaw = nv_adma_thaw,
.error_handler = nv_adma_error_handler,
.post_internal_cmd = nv_adma_post_internal_cmd,
.data_xfer = ata_data_xfer,
@@ -815,8 +817,16 @@ static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
u16 status;
u32 gen_ctl;
u32 notifier, notifier_error;
+
+ /* if ADMA is disabled, use standard ata interrupt handler */
+ if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
+ u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
+ >> (NV_INT_PORT_SHIFT * i);
+ handled += nv_host_intr(ap, irq_stat);
+ continue;
+ }
- /* if in ATA register mode, use standard ata interrupt handler */
+ /* if in ATA register mode, check for standard interrupts */
if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
>> (NV_INT_PORT_SHIFT * i);
@@ -826,7 +836,6 @@ static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
command is active, to prevent losing interrupts. */
irq_stat |= NV_INT_DEV;
handled += nv_host_intr(ap, irq_stat);
- continue;
}
notifier = readl(mmio + NV_ADMA_NOTIFIER);
@@ -912,22 +921,77 @@ static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
return IRQ_RETVAL(handled);
}
+static void nv_adma_freeze(struct ata_port *ap)
+{
+ struct nv_adma_port_priv *pp = ap->private_data;
+ void __iomem *mmio = pp->ctl_block;
+ u16 tmp;
+
+ nv_ck804_freeze(ap);
+
+ if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
+ return;
+
+ /* clear any outstanding CK804 notifications */
+ writeb( NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
+ ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
+
+ /* Disable interrupt */
+ tmp = readw(mmio + NV_ADMA_CTL);
+ writew( tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
+ mmio + NV_ADMA_CTL);
+ readw( mmio + NV_ADMA_CTL ); /* flush posted write */
+}
+
+static void nv_adma_thaw(struct ata_port *ap)
+{
+ struct nv_adma_port_priv *pp = ap->private_data;
+ void __iomem *mmio = pp->ctl_block;
+ u16 tmp;
+
+ nv_ck804_thaw(ap);
+
+ if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
+ return;
+
+ /* Enable interrupt */
+ tmp = readw(mmio + NV_ADMA_CTL);
+ writew( tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
+ mmio + NV_ADMA_CTL);
+ readw( mmio + NV_ADMA_CTL ); /* flush posted write */
+}
+
static void nv_adma_irq_clear(struct ata_port *ap)
{
struct nv_adma_port_priv *pp = ap->private_data;
void __iomem *mmio = pp->ctl_block;
- u16 status = readw(mmio + NV_ADMA_STAT);
- u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
- u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
- void __iomem *dma_stat_addr = ap->ioaddr.bmdma_addr + ATA_DMA_STATUS;
+ u32 notifier_clears[2];
- /* clear ADMA status */
- writew(status, mmio + NV_ADMA_STAT);
- writel(notifier | notifier_error,
- pp->notifier_clear_block);
+ if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
+ ata_bmdma_irq_clear(ap);
+ return;
+ }
+
+ /* clear any outstanding CK804 notifications */
+ writeb( NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
+ ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
- /** clear legacy status */
- iowrite8(ioread8(dma_stat_addr), dma_stat_addr);
+ /* clear ADMA status */
+ writew(0xffff, mmio + NV_ADMA_STAT);
+
+ /* clear notifiers - note both ports need to be written with
+ something even though we are only clearing on one */
+ if (ap->port_no == 0) {
+ notifier_clears[0] = 0xFFFFFFFF;
+ notifier_clears[1] = 0;
+ } else {
+ notifier_clears[0] = 0;
+ notifier_clears[1] = 0xFFFFFFFF;
+ }
+ pp = ap->host->ports[0]->private_data;
+ writel(notifier_clears[0], pp->notifier_clear_block);
+ pp = ap->host->ports[1]->private_data;
+ writel(notifier_clears[1], pp->notifier_clear_block);
}
static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc)
diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
index f56549b90aa..3a7d9b5332a 100644
--- a/drivers/ata/sata_promise.c
+++ b/drivers/ata/sata_promise.c
@@ -45,7 +45,7 @@
#include "sata_promise.h"
#define DRV_NAME "sata_promise"
-#define DRV_VERSION "2.05"
+#define DRV_VERSION "2.07"
enum {
@@ -653,6 +653,8 @@ static void pdc_error_intr(struct ata_port *ap, struct ata_queued_cmd *qc,
qc->err_mask |= ac_err_mask;
pdc_reset_port(ap);
+
+ ata_port_abort(ap);
}
static inline unsigned int pdc_host_intr( struct ata_port *ap,
@@ -924,6 +926,7 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
struct ata_host *host;
void __iomem *base;
int n_ports, i, rc;
+ int is_sataii_tx4;
if (!printed_version++)
dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
@@ -962,10 +965,23 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
}
host->iomap = pcim_iomap_table(pdev);
- for (i = 0; i < host->n_ports; i++)
+ is_sataii_tx4 = 0;
+ if ((pi->flags & (PDC_FLAG_GEN_II|PDC_FLAG_4_PORTS)) == (PDC_FLAG_GEN_II|PDC_FLAG_4_PORTS)) {
+ is_sataii_tx4 = 1;
+ dev_printk(KERN_INFO, &pdev->dev, "applying SATAII TX4 port numbering workaround\n");
+ }
+ for (i = 0; i < host->n_ports; i++) {
+ static const unsigned char sataii_tx4_port_remap[4] = { 3, 1, 0, 2};
+ int ata_nr;
+
+ ata_nr = i;
+ if (is_sataii_tx4)
+ ata_nr = sataii_tx4_port_remap[i];
+
pdc_ata_setup_port(host->ports[i],
- base + 0x200 + i * 0x80,
- base + 0x400 + i * 0x100);
+ base + 0x200 + ata_nr * 0x80,
+ base + 0x400 + ata_nr * 0x100);
+ }
/* initialize adapter */
pdc_host_init(host);
diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
index 305ab7c68ca..939c9246fdd 100644
--- a/drivers/ata/sata_via.c
+++ b/drivers/ata/sata_via.c
@@ -93,6 +93,10 @@ static struct pci_driver svia_pci_driver = {
.name = DRV_NAME,
.id_table = svia_pci_tbl,
.probe = svia_init_one,
+#ifdef CONFIG_PM
+ .suspend = ata_pci_device_suspend,
+ .resume = ata_pci_device_resume,
+#endif
.remove = ata_pci_remove_one,
};
@@ -112,6 +116,10 @@ static struct scsi_host_template svia_sht = {
.slave_configure = ata_scsi_slave_config,
.slave_destroy = ata_scsi_slave_destroy,
.bios_param = ata_std_bios_param,
+#ifdef CONFIG_PM
+ .suspend = ata_scsi_device_suspend,
+ .resume = ata_scsi_device_resume,
+#endif
};
static const struct ata_port_operations vt6420_sata_ops = {
diff --git a/drivers/auxdisplay/Kconfig b/drivers/auxdisplay/Kconfig
index 0300e7f54cc..2e18a63ead3 100644
--- a/drivers/auxdisplay/Kconfig
+++ b/drivers/auxdisplay/Kconfig
@@ -6,6 +6,7 @@
#
menu "Auxiliary Display support"
+ depends on PARPORT
config KS0108
tristate "KS0108 LCD Controller"
diff --git a/drivers/base/devres.c b/drivers/base/devres.c
index e177c9533b6..e1c0730a3b9 100644
--- a/drivers/base/devres.c
+++ b/drivers/base/devres.c
@@ -101,19 +101,6 @@ static void add_dr(struct device *dev, struct devres_node *node)
list_add_tail(&node->entry, &dev->devres_head);
}
-/**
- * devres_alloc - Allocate device resource data
- * @release: Release function devres will be associated with
- * @size: Allocation size
- * @gfp: Allocation flags
- *
- * allocate devres of @size bytes. The allocated area is zeroed, then
- * associated with @release. The returned pointer can be passed to
- * other devres_*() functions.
- *
- * RETURNS:
- * Pointer to allocated devres on success, NULL on failure.
- */
#ifdef CONFIG_DEBUG_DEVRES
void * __devres_alloc(dr_release_t release, size_t size, gfp_t gfp,
const char *name)
@@ -128,6 +115,19 @@ void * __devres_alloc(dr_release_t release, size_t size, gfp_t gfp,
}
EXPORT_SYMBOL_GPL(__devres_alloc);
#else
+/**
+ * devres_alloc - Allocate device resource data
+ * @release: Release function devres will be associated with
+ * @size: Allocation size
+ * @gfp: Allocation flags
+ *
+ * Allocate devres of @size bytes. The allocated area is zeroed, then
+ * associated with @release. The returned pointer can be passed to
+ * other devres_*() functions.
+ *
+ * RETURNS:
+ * Pointer to allocated devres on success, NULL on failure.
+ */
void * devres_alloc(dr_release_t release, size_t size, gfp_t gfp)
{
struct devres *dr;
@@ -416,7 +416,7 @@ static int release_nodes(struct device *dev, struct list_head *first,
}
/**
- * devres_release_all - Release all resources
+ * devres_release_all - Release all managed resources
* @dev: Device to release resources for
*
* Release all resources associated with @dev. This function is
@@ -600,7 +600,7 @@ static int devm_kzalloc_match(struct device *dev, void *res, void *data)
}
/**
- * devm_kzalloc - Managed kzalloc
+ * devm_kzalloc - Resource-managed kzalloc
* @dev: Device to allocate memory for
* @size: Allocation size
* @gfp: Allocation gfp flags
@@ -628,7 +628,7 @@ void * devm_kzalloc(struct device *dev, size_t size, gfp_t gfp)
EXPORT_SYMBOL_GPL(devm_kzalloc);
/**
- * devm_kfree - Managed kfree
+ * devm_kfree - Resource-managed kfree
* @dev: Device this memory belongs to
* @p: Memory to free
*
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index eb84d9d4464..869ff8c0014 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -360,7 +360,7 @@ EXPORT_SYMBOL_GPL(platform_device_unregister);
* This function creates a simple platform device that requires minimal
* resource and memory management. Canned release function freeing
* memory allocated for the device allows drivers using such devices
- * to be unloaded iwithout waiting for the last reference to the device
+ * to be unloaded without waiting for the last reference to the device
* to be dropped.
*
* This interface is primarily intended for use with legacy drivers
diff --git a/drivers/base/topology.c b/drivers/base/topology.c
index 067a9e8bc37..8d8cdfec652 100644
--- a/drivers/base/topology.c
+++ b/drivers/base/topology.c
@@ -126,10 +126,13 @@ static int __cpuinit topology_cpu_callback(struct notifier_block *nfb,
switch (action) {
case CPU_UP_PREPARE:
+ case CPU_UP_PREPARE_FROZEN:
rc = topology_add_dev(cpu);
break;
case CPU_UP_CANCELED:
+ case CPU_UP_CANCELED_FROZEN:
case CPU_DEAD:
+ case CPU_DEAD_FROZEN:
topology_remove_dev(cpu);
break;
}
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 17ee97f3a99..b4c8319138b 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -444,8 +444,6 @@ config CDROM_PKTCDVD_WCACHE
this option is dangerous unless the CD-RW media is known good, as we
don't do deferred write error handling yet.
-source "drivers/s390/block/Kconfig"
-
config ATA_OVER_ETH
tristate "ATA over Ethernet support"
depends on NET
@@ -453,6 +451,8 @@ config ATA_OVER_ETH
This driver provides Support for ATA over Ethernet block
devices like the Coraid EtherDrive (R) Storage Blade.
+source "drivers/s390/block/Kconfig"
+
endmenu
endif
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index af6d7274a7c..18cdd8c7762 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -243,17 +243,13 @@ static int do_lo_send_aops(struct loop_device *lo, struct bio_vec *bvec,
transfer_result = lo_do_transfer(lo, WRITE, page, offset,
bvec->bv_page, bv_offs, size, IV);
if (unlikely(transfer_result)) {
- char *kaddr;
-
/*
* The transfer failed, but we still write the data to
* keep prepare/commit calls balanced.
*/
printk(KERN_ERR "loop: transfer error block %llu\n",
(unsigned long long)index);
- kaddr = kmap_atomic(page, KM_USER0);
- memset(kaddr + offset, 0, size);
- kunmap_atomic(kaddr, KM_USER0);
+ zero_user_page(page, offset, size, KM_USER0);
}
flush_dcache_page(page);
ret = aops->commit_write(file, page, offset,
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 090796bef78..069ae39a9cd 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -366,20 +366,25 @@ static struct disk_attribute pid_attr = {
.show = pid_show,
};
-static void nbd_do_it(struct nbd_device *lo)
+static int nbd_do_it(struct nbd_device *lo)
{
struct request *req;
+ int ret;
BUG_ON(lo->magic != LO_MAGIC);
lo->pid = current->pid;
- sysfs_create_file(&lo->disk->kobj, &pid_attr.attr);
+ ret = sysfs_create_file(&lo->disk->kobj, &pid_attr.attr);
+ if (ret) {
+ printk(KERN_ERR "nbd: sysfs_create_file failed!");
+ return ret;
+ }
while ((req = nbd_read_stat(lo)) != NULL)
nbd_end_request(req);
sysfs_remove_file(&lo->disk->kobj, &pid_attr.attr);
- return;
+ return 0;
}
static void nbd_clear_que(struct nbd_device *lo)
@@ -569,7 +574,9 @@ static int nbd_ioctl(struct inode *inode, struct file *file,
case NBD_DO_IT:
if (!lo->file)
return -EINVAL;
- nbd_do_it(lo);
+ error = nbd_do_it(lo);
+ if (error)
+ return error;
/* on return tidy up in case we have a signal */
/* Forcibly shutdown the socket causing all listeners
* to error
diff --git a/drivers/block/rd.c b/drivers/block/rd.c
index 43d4ebcb3b4..a1512da3241 100644
--- a/drivers/block/rd.c
+++ b/drivers/block/rd.c
@@ -151,7 +151,7 @@ static int ramdisk_commit_write(struct file *file, struct page *page,
}
/*
- * ->writepage to the the blockdev's mapping has to redirty the page so that the
+ * ->writepage to the blockdev's mapping has to redirty the page so that the
* VM doesn't go and steal it. We return AOP_WRITEPAGE_ACTIVATE so that the VM
* won't try to (pointlessly) write the page again for a while.
*
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 1e32fb834eb..abcafac6473 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -6,6 +6,7 @@ menu "Character devices"
config VT
bool "Virtual terminal" if EMBEDDED
+ depends on !S390
select INPUT
default y if !VIOCONS
---help---
@@ -81,6 +82,7 @@ config VT_HW_CONSOLE_BINDING
config SERIAL_NONSTANDARD
bool "Non-standard serial port support"
+ depends on HAS_IOMEM
---help---
Say Y here if you have any non-standard serial boards -- boards
which aren't supported using the standard "dumb" serial driver.
@@ -631,7 +633,8 @@ config HVC_CONSOLE
config HVC_ISERIES
bool "iSeries Hypervisor Virtual Console support"
- depends on PPC_ISERIES && !VIOCONS
+ depends on PPC_ISERIES
+ default y
select HVC_DRIVER
help
iSeries machines support a hypervisor virtual console.
@@ -764,7 +767,7 @@ config NVRAM
config RTC
tristate "Enhanced Real Time Clock Support"
- depends on !PPC && !PARISC && !IA64 && !M68K && (!SPARC || PCI) && !FRV && !ARM && !SUPERH
+ depends on !PPC && !PARISC && !IA64 && !M68K && (!SPARC || PCI) && !FRV && !ARM && !SUPERH && !S390
---help---
If you say Y here and create a character special file /dev/rtc with
major number 10 and minor number 135 using mknod ("man mknod"), you
@@ -812,7 +815,7 @@ config SGI_IP27_RTC
config GEN_RTC
tristate "Generic /dev/rtc emulation"
- depends on RTC!=y && !IA64 && !ARM && !M32R && !SPARC && !FRV
+ depends on RTC!=y && !IA64 && !ARM && !M32R && !SPARC && !FRV && !S390
---help---
If you say Y here and create a character special file /dev/rtc with
major number 10 and minor number 135 using mknod ("man mknod"), you
@@ -857,6 +860,7 @@ config COBALT_LCD
config DTLK
tristate "Double Talk PC internal speech card support"
+ depends on ISA
help
This driver is for the DoubleTalk PC, a speech synthesizer
manufactured by RC Systems (<http://www.rcsys.com/>). It is also
@@ -1042,7 +1046,7 @@ config HPET_MMAP
config HANGCHECK_TIMER
tristate "Hangcheck timer"
- depends on X86 || IA64 || PPC64
+ depends on X86 || IA64 || PPC64 || S390
help
The hangcheck-timer module detects when the system has gone
out to lunch past a certain margin. It can reboot the system
@@ -1077,5 +1081,7 @@ config DEVPORT
depends on ISA || PCI
default y
+source "drivers/s390/char/Kconfig"
+
endmenu
diff --git a/drivers/char/drm/drm_dma.c b/drivers/char/drm/drm_dma.c
index 892db709698..32ed19c9ec1 100644
--- a/drivers/char/drm/drm_dma.c
+++ b/drivers/char/drm/drm_dma.c
@@ -65,7 +65,7 @@ int drm_dma_setup(drm_device_t * dev)
* \param dev DRM device.
*
* Free all pages associated with DMA buffers, the buffers and pages lists, and
- * finally the the drm_device::dma structure itself.
+ * finally the drm_device::dma structure itself.
*/
void drm_dma_takedown(drm_device_t * dev)
{
diff --git a/drivers/char/drm/drm_vm.c b/drivers/char/drm/drm_vm.c
index 35540cfb43d..b5c5b9fa84c 100644
--- a/drivers/char/drm/drm_vm.c
+++ b/drivers/char/drm/drm_vm.c
@@ -157,7 +157,7 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
* \param address access address.
* \return pointer to the page structure.
*
- * Get the the mapping, find the real physical page to map, get the page, and
+ * Get the mapping, find the real physical page to map, get the page, and
* return it.
*/
static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma,
diff --git a/drivers/char/drm/r300_reg.h b/drivers/char/drm/r300_reg.h
index a881f96c983..ecda760ae8c 100644
--- a/drivers/char/drm/r300_reg.h
+++ b/drivers/char/drm/r300_reg.h
@@ -293,7 +293,7 @@ I am fairly certain that they are correct unless stated otherwise in comments.
# define R300_PVS_CNTL_1_PROGRAM_START_SHIFT 0
# define R300_PVS_CNTL_1_POS_END_SHIFT 10
# define R300_PVS_CNTL_1_PROGRAM_END_SHIFT 20
-/* Addresses are relative the the vertex program parameters area. */
+/* Addresses are relative to the vertex program parameters area. */
#define R300_VAP_PVS_CNTL_2 0x22D4
# define R300_PVS_CNTL_2_PARAM_OFFSET_SHIFT 0
# define R300_PVS_CNTL_2_PARAM_COUNT_SHIFT 16
diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
index 49f914e7921..9e1fc02967f 100644
--- a/drivers/char/genrtc.c
+++ b/drivers/char/genrtc.c
@@ -12,7 +12,7 @@
*
* This driver allows use of the real time clock (built into
* nearly all computers) from user space. It exports the /dev/rtc
- * interface supporting various ioctl() and also the /proc/dev/rtc
+ * interface supporting various ioctl() and also the /proc/driver/rtc
* pseudo-file for status information.
*
* The ioctls can be used to set the interrupt behaviour where
@@ -377,7 +377,7 @@ static int gen_rtc_release(struct inode *inode, struct file *file)
#ifdef CONFIG_PROC_FS
/*
- * Info exported via "/proc/rtc".
+ * Info exported via "/proc/driver/rtc".
*/
static int gen_rtc_proc_output(char *buf)
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 5f3acd8e64b..7cda04b3353 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -91,3 +91,17 @@ config HW_RANDOM_OMAP
module will be called omap-rng.
If unsure, say Y.
+
+config HW_RANDOM_PASEMI
+ tristate "PA Semi HW Random Number Generator support"
+ depends on HW_RANDOM && PPC_PASEMI
+ default HW_RANDOM
+ ---help---
+ This driver provides kernel-side support for the Random Number
+ Generator hardware found on PA6T-1682M processor.
+
+ To compile this driver as a module, choose M here: the
+ module will be called pasemi-rng.
+
+ If unsure, say Y.
+
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index c41fa19454e..c8b7300e2fb 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -10,3 +10,4 @@ obj-$(CONFIG_HW_RANDOM_GEODE) += geode-rng.o
obj-$(CONFIG_HW_RANDOM_VIA) += via-rng.o
obj-$(CONFIG_HW_RANDOM_IXP4XX) += ixp4xx-rng.o
obj-$(CONFIG_HW_RANDOM_OMAP) += omap-rng.o
+obj-$(CONFIG_HW_RANDOM_PASEMI) += pasemi-rng.o
diff --git a/drivers/char/hw_random/pasemi-rng.c b/drivers/char/hw_random/pasemi-rng.c
new file mode 100644
index 00000000000..fa6040b6c8f
--- /dev/null
+++ b/drivers/char/hw_random/pasemi-rng.c
@@ -0,0 +1,156 @@
+/*
+ * Copyright (C) 2006-2007 PA Semi, Inc
+ *
+ * Maintained by: Olof Johansson <olof@lixom.net>
+ *
+ * Driver for the PWRficient onchip rng
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/hw_random.h>
+#include <asm/of_platform.h>
+#include <asm/io.h>
+
+#define SDCRNG_CTL_REG 0x00
+#define SDCRNG_CTL_FVLD_M 0x0000f000
+#define SDCRNG_CTL_FVLD_S 12
+#define SDCRNG_CTL_KSZ 0x00000800
+#define SDCRNG_CTL_RSRC_CRG 0x00000010
+#define SDCRNG_CTL_RSRC_RRG 0x00000000
+#define SDCRNG_CTL_CE 0x00000004
+#define SDCRNG_CTL_RE 0x00000002
+#define SDCRNG_CTL_DR 0x00000001
+#define SDCRNG_CTL_SELECT_RRG_RNG (SDCRNG_CTL_RE | SDCRNG_CTL_RSRC_RRG)
+#define SDCRNG_CTL_SELECT_CRG_RNG (SDCRNG_CTL_CE | SDCRNG_CTL_RSRC_CRG)
+#define SDCRNG_VAL_REG 0x20
+
+#define MODULE_NAME "pasemi_rng"
+
+static int pasemi_rng_data_present(struct hwrng *rng)
+{
+ void __iomem *rng_regs = (void __iomem *)rng->priv;
+
+ return (in_le32(rng_regs + SDCRNG_CTL_REG)
+ & SDCRNG_CTL_FVLD_M) ? 1 : 0;
+}
+
+static int pasemi_rng_data_read(struct hwrng *rng, u32 *data)
+{
+ void __iomem *rng_regs = (void __iomem *)rng->priv;
+ *data = in_le32(rng_regs + SDCRNG_VAL_REG);
+ return 4;
+}
+
+static int pasemi_rng_init(struct hwrng *rng)
+{
+ void __iomem *rng_regs = (void __iomem *)rng->priv;
+ u32 ctl;
+
+ ctl = SDCRNG_CTL_DR | SDCRNG_CTL_SELECT_RRG_RNG | SDCRNG_CTL_KSZ;
+ out_le32(rng_regs + SDCRNG_CTL_REG, ctl);
+ out_le32(rng_regs + SDCRNG_CTL_REG, ctl & ~SDCRNG_CTL_DR);
+
+ return 0;
+}
+
+static void pasemi_rng_cleanup(struct hwrng *rng)
+{
+ void __iomem *rng_regs = (void __iomem *)rng->priv;
+ u32 ctl;
+
+ ctl = SDCRNG_CTL_RE | SDCRNG_CTL_CE;
+ out_le32(rng_regs + SDCRNG_CTL_REG,
+ in_le32(rng_regs + SDCRNG_CTL_REG) & ~ctl);
+}
+
+static struct hwrng pasemi_rng = {
+ .name = MODULE_NAME,
+ .init = pasemi_rng_init,
+ .cleanup = pasemi_rng_cleanup,
+ .data_present = pasemi_rng_data_present,
+ .data_read = pasemi_rng_data_read,
+};
+
+static int __devinit rng_probe(struct of_device *ofdev,
+ const struct of_device_id *match)
+{
+ void __iomem *rng_regs;
+ struct device_node *rng_np = ofdev->node;
+ struct resource res;
+ int err = 0;
+
+ err = of_address_to_resource(rng_np, 0, &res);
+ if (err)
+ return -ENODEV;
+
+ rng_regs = ioremap(res.start, 0x100);
+
+ if (!rng_regs)
+ return -ENOMEM;
+
+ pasemi_rng.priv = (unsigned long)rng_regs;
+
+ printk(KERN_INFO "Registering PA Semi RNG\n");
+
+ err = hwrng_register(&pasemi_rng);
+
+ if (err)
+ iounmap(rng_regs);
+
+ return err;
+}
+
+static int __devexit rng_remove(struct of_device *dev)
+{
+ void __iomem *rng_regs = (void __iomem *)pasemi_rng.priv;
+
+ hwrng_unregister(&pasemi_rng);
+ iounmap(rng_regs);
+
+ return 0;
+}
+
+static struct of_device_id rng_match[] = {
+ {
+ .compatible = "1682m-rng",
+ },
+ {},
+};
+
+static struct of_platform_driver rng_driver = {
+ .name = "pasemi-rng",
+ .match_table = rng_match,
+ .probe = rng_probe,
+ .remove = rng_remove,
+};
+
+static int __init rng_init(void)
+{
+ return of_register_platform_driver(&rng_driver);
+}
+module_init(rng_init);
+
+static void __exit rng_exit(void)
+{
+ of_unregister_platform_driver(&rng_driver);
+}
+module_exit(rng_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Egor Martovetsky <egor@pasemi.com>");
+MODULE_DESCRIPTION("H/W RNG driver for PA Semi processor");
diff --git a/drivers/char/ipmi/Kconfig b/drivers/char/ipmi/Kconfig
index a6dcb291815..b894f67fdf1 100644
--- a/drivers/char/ipmi/Kconfig
+++ b/drivers/char/ipmi/Kconfig
@@ -3,6 +3,8 @@
#
menu "IPMI"
+ depends on HAS_IOMEM
+
config IPMI_HANDLER
tristate 'IPMI top-level message handler'
help
diff --git a/drivers/char/mmtimer.c b/drivers/char/mmtimer.c
index c09160383a5..6e55cfb9c65 100644
--- a/drivers/char/mmtimer.c
+++ b/drivers/char/mmtimer.c
@@ -705,15 +705,13 @@ static int __init mmtimer_init(void)
maxn++;
/* Allocate list of node ptrs to mmtimer_t's */
- timers = kmalloc(sizeof(mmtimer_t *)*maxn, GFP_KERNEL);
+ timers = kzalloc(sizeof(mmtimer_t *)*maxn, GFP_KERNEL);
if (timers == NULL) {
printk(KERN_ERR "%s: failed to allocate memory for device\n",
MMTIMER_NAME);
goto out3;
}
- memset(timers,0,(sizeof(mmtimer_t *)*maxn));
-
/* Allocate mmtimer_t's for each online node */
for_each_online_node(node) {
timers[node] = kmalloc_node(sizeof(mmtimer_t)*NUM_COMPARATORS, GFP_KERNEL, node);
diff --git a/drivers/char/pcmcia/Kconfig b/drivers/char/pcmcia/Kconfig
index 27c1179ee52..f25facd97bb 100644
--- a/drivers/char/pcmcia/Kconfig
+++ b/drivers/char/pcmcia/Kconfig
@@ -21,6 +21,7 @@ config SYNCLINK_CS
config CARDMAN_4000
tristate "Omnikey Cardman 4000 support"
depends on PCMCIA
+ select BITREVERSE
help
Enable support for the Omnikey Cardman 4000 PCMCIA Smartcard
reader.
diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c
index e91b43a014b..fee58e03dbe 100644
--- a/drivers/char/pcmcia/cm4000_cs.c
+++ b/drivers/char/pcmcia/cm4000_cs.c
@@ -31,6 +31,7 @@
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/delay.h>
+#include <linux/bitrev.h>
#include <asm/uaccess.h>
#include <asm/io.h>
@@ -194,41 +195,17 @@ static inline unsigned char xinb(unsigned short port)
}
#endif
-#define b_0000 15
-#define b_0001 14
-#define b_0010 13
-#define b_0011 12
-#define b_0100 11
-#define b_0101 10
-#define b_0110 9
-#define b_0111 8
-#define b_1000 7
-#define b_1001 6
-#define b_1010 5
-#define b_1011 4
-#define b_1100 3
-#define b_1101 2
-#define b_1110 1
-#define b_1111 0
-
-static unsigned char irtab[16] = {
- b_0000, b_1000, b_0100, b_1100,
- b_0010, b_1010, b_0110, b_1110,
- b_0001, b_1001, b_0101, b_1101,
- b_0011, b_1011, b_0111, b_1111
-};
+static inline unsigned char invert_revert(unsigned char ch)
+{
+ return bitrev8(~ch);
+}
static void str_invert_revert(unsigned char *b, int len)
{
int i;
for (i = 0; i < len; i++)
- b[i] = (irtab[b[i] & 0x0f] << 4) | irtab[b[i] >> 4];
-}
-
-static unsigned char invert_revert(unsigned char ch)
-{
- return (irtab[ch & 0x0f] << 4) | irtab[ch >> 4];
+ b[i] = invert_revert(b[i]);
}
#define ATRLENCK(dev,pos) \
@@ -1114,7 +1091,7 @@ static ssize_t cmm_write(struct file *filp, const char __user *buf,
/*
* wait for atr to become valid.
* note: it is important to lock this code. if we dont, the monitor
- * could be run between test_bit and the the call the sleep on the
+ * could be run between test_bit and the call to sleep on the
* atr-queue. if *then* the monitor detects atr valid, it will wake up
* any process on the atr-queue, *but* since we have been interrupted,
* we do not yet sleep on this queue. this would result in a missed
@@ -1881,8 +1858,11 @@ static int cm4000_probe(struct pcmcia_device *link)
init_waitqueue_head(&dev->readq);
ret = cm4000_config(link, i);
- if (ret)
+ if (ret) {
+ dev_table[i] = NULL;
+ kfree(dev);
return ret;
+ }
class_device_create(cmm_class, NULL, MKDEV(major, i), NULL,
"cmm%d", i);
@@ -1907,7 +1887,7 @@ static void cm4000_detach(struct pcmcia_device *link)
cm4000_release(link);
dev_table[devno] = NULL;
- kfree(dev);
+ kfree(dev);
class_device_destroy(cmm_class, MKDEV(major, devno));
@@ -1956,12 +1936,14 @@ static int __init cmm_init(void)
if (major < 0) {
printk(KERN_WARNING MODULE_NAME
": could not get major number\n");
+ class_destroy(cmm_class);
return major;
}
rc = pcmcia_register_driver(&cm4000_driver);
if (rc < 0) {
unregister_chrdev(major, DEVICE_NAME);
+ class_destroy(cmm_class);
return rc;
}
diff --git a/drivers/char/pcmcia/cm4040_cs.c b/drivers/char/pcmcia/cm4040_cs.c
index f2e4ec4fd40..af88181a17f 100644
--- a/drivers/char/pcmcia/cm4040_cs.c
+++ b/drivers/char/pcmcia/cm4040_cs.c
@@ -636,8 +636,11 @@ static int reader_probe(struct pcmcia_device *link)
setup_timer(&dev->poll_timer, cm4040_do_poll, 0);
ret = reader_config(link, i);
- if (ret)
+ if (ret) {
+ dev_table[i] = NULL;
+ kfree(dev);
return ret;
+ }
class_device_create(cmx_class, NULL, MKDEV(major, i), NULL,
"cmx%d", i);
@@ -708,12 +711,14 @@ static int __init cm4040_init(void)
if (major < 0) {
printk(KERN_WARNING MODULE_NAME
": could not get major number\n");
+ class_destroy(cmx_class);
return major;
}
rc = pcmcia_register_driver(&reader_driver);
if (rc < 0) {
unregister_chrdev(major, DEVICE_NAME);
+ class_destroy(cmx_class);
return rc;
}
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
index fe00c7dfb64..dc4e1ff7f56 100644
--- a/drivers/char/tpm/Kconfig
+++ b/drivers/char/tpm/Kconfig
@@ -3,6 +3,7 @@
#
menu "TPM devices"
+ depends on HAS_IOMEM
config TCG_TPM
tristate "TPM Hardware Support"
@@ -33,7 +34,7 @@ config TCG_NSC
tristate "National Semiconductor TPM Interface"
depends on TCG_TPM && PNPACPI
---help---
- If you have a TPM security chip from National Semicondutor
+ If you have a TPM security chip from National Semiconductor
say Yes and it will be accessible from within Linux. To
compile this driver as a module, choose M here; the module
will be called tpm_nsc.
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index 7710a6a77d9..fc662e4ce58 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -934,13 +934,6 @@ restart:
return -EINVAL;
/*
- * No more input please, we are switching. The new ldisc
- * will update this value in the ldisc open function
- */
-
- tty->receive_room = 0;
-
- /*
* Problem: What do we do if this blocks ?
*/
@@ -951,6 +944,13 @@ restart:
return 0;
}
+ /*
+ * No more input please, we are switching. The new ldisc
+ * will update this value in the ldisc open function
+ */
+
+ tty->receive_room = 0;
+
o_ldisc = tty->ldisc;
o_tty = tty->link;
@@ -1573,11 +1573,11 @@ void no_tty(void)
/**
- * stop_tty - propogate flow control
+ * stop_tty - propagate flow control
* @tty: tty to stop
*
* Perform flow control to the driver. For PTY/TTY pairs we
- * must also propogate the TIOCKPKT status. May be called
+ * must also propagate the TIOCKPKT status. May be called
* on an already stopped device and will not re-call the driver
* method.
*
@@ -1607,11 +1607,11 @@ void stop_tty(struct tty_struct *tty)
EXPORT_SYMBOL(stop_tty);
/**
- * start_tty - propogate flow control
+ * start_tty - propagate flow control
* @tty: tty to start
*
* Start a tty that has been stopped if at all possible. Perform
- * any neccessary wakeups and propogate the TIOCPKT status. If this
+ * any neccessary wakeups and propagate the TIOCPKT status. If this
* is the tty was previous stopped and is being started then the
* driver start method is invoked and the line discipline woken.
*
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 893dbaf386f..eb37fba9b7e 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1685,9 +1685,11 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
if (sys_dev) {
switch (action) {
case CPU_ONLINE:
+ case CPU_ONLINE_FROZEN:
cpufreq_add_dev(sys_dev);
break;
case CPU_DOWN_PREPARE:
+ case CPU_DOWN_PREPARE_FROZEN:
if (unlikely(lock_policy_rwsem_write(cpu)))
BUG();
@@ -1699,6 +1701,7 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
__cpufreq_remove_dev(sys_dev);
break;
case CPU_DOWN_FAILED:
+ case CPU_DOWN_FAILED_FROZEN:
cpufreq_add_dev(sys_dev);
break;
}
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index d1c7cac9316..d2f0cbd8b8f 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -313,9 +313,11 @@ static int cpufreq_stat_cpu_callback(struct notifier_block *nfb,
switch (action) {
case CPU_ONLINE:
+ case CPU_ONLINE_FROZEN:
cpufreq_update_policy(cpu);
break;
case CPU_DEAD:
+ case CPU_DEAD_FROZEN:
cpufreq_stats_free_table(cpu);
break;
}
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index f21fe66c9ee..e678a33ea67 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -51,9 +51,31 @@ config CRYPTO_DEV_GEODE
default m
help
Say 'Y' here to use the AMD Geode LX processor on-board AES
- engine for the CryptoAPI AES alogrithm.
+ engine for the CryptoAPI AES algorithm.
To compile this driver as a module, choose M here: the module
will be called geode-aes.
+config ZCRYPT
+ tristate "Support for PCI-attached cryptographic adapters"
+ depends on S390
+ select ZCRYPT_MONOLITHIC if ZCRYPT="y"
+ default "m"
+ help
+ Select this option if you want to use a PCI-attached cryptographic
+ adapter like:
+ + PCI Cryptographic Accelerator (PCICA)
+ + PCI Cryptographic Coprocessor (PCICC)
+ + PCI-X Cryptographic Coprocessor (PCIXCC)
+ + Crypto Express2 Coprocessor (CEX2C)
+ + Crypto Express2 Accelerator (CEX2A)
+
+config ZCRYPT_MONOLITHIC
+ bool "Monolithic zcrypt module"
+ depends on ZCRYPT="m"
+ help
+ Select this option if you want to have a single module z90crypt.ko
+ that contains all parts of the crypto device driver (ap bus,
+ request router and all the card drivers).
+
endmenu
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 30d021d1a07..72be6c63edf 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -3,6 +3,7 @@
#
menu "DMA Engine support"
+ depends on !S390
config DMA_ENGINE
bool "Support for DMA engines"
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 4f0898400c6..807c402df04 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -7,6 +7,7 @@
#
menu 'EDAC - error detection and reporting (RAS) (EXPERIMENTAL)'
+ depends on HAS_IOMEM
config EDAC
tristate "EDAC core system error reporting (EXPERIMENTAL)"
diff --git a/drivers/firewire/Kconfig b/drivers/firewire/Kconfig
new file mode 100644
index 00000000000..5932c72f9e4
--- /dev/null
+++ b/drivers/firewire/Kconfig
@@ -0,0 +1,61 @@
+# -*- shell-script -*-
+
+comment "An alternative FireWire stack is available with EXPERIMENTAL=y"
+ depends on EXPERIMENTAL=n
+
+config FIREWIRE
+ tristate "IEEE 1394 (FireWire) support (JUJU alternative stack, experimental)"
+ depends on EXPERIMENTAL
+ select CRC_ITU_T
+ help
+ IEEE 1394 describes a high performance serial bus, which is also
+ known as FireWire(tm) or i.Link(tm) and is used for connecting all
+ sorts of devices (most notably digital video cameras) to your
+ computer.
+
+ If you have FireWire hardware and want to use it, say Y here. This
+ is the core support only, you will also need to select a driver for
+ your IEEE 1394 adapter.
+
+ To compile this driver as a module, say M here: the module will be
+ called fw-core.
+
+ This is the "JUJU" FireWire stack, an alternative implementation
+ designed for robustness and simplicity. You can build either this
+ stack, or the classic stack (the ieee1394 driver, ohci1394 etc.)
+ or both.
+
+config FIREWIRE_OHCI
+ tristate "Support for OHCI FireWire host controllers"
+ depends on PCI && FIREWIRE
+ help
+ Enable this driver if you have a FireWire controller based
+ on the OHCI specification. For all practical purposes, this
+ is the only chipset in use, so say Y here.
+
+ To compile this driver as a module, say M here: The module will be
+ called fw-ohci.
+
+ If you also build ohci1394 of the classic IEEE 1394 driver stack,
+ blacklist either ohci1394 or fw-ohci to let hotplug load the desired
+ driver.
+
+config FIREWIRE_SBP2
+ tristate "Support for storage devices (SBP-2 protocol driver)"
+ depends on FIREWIRE && SCSI
+ help
+ This option enables you to use SBP-2 devices connected to a
+ FireWire bus. SBP-2 devices include storage devices like
+ harddisks and DVD drives, also some other FireWire devices
+ like scanners.
+
+ To compile this driver as a module, say M here: The module will be
+ called fw-sbp2.
+
+ You should also enable support for disks, CD-ROMs, etc. in the SCSI
+ configuration section.
+
+ If you also build sbp2 of the classic IEEE 1394 driver stack,
+ blacklist either sbp2 or fw-sbp2 to let hotplug load the desired
+ driver.
+
diff --git a/drivers/firewire/Makefile b/drivers/firewire/Makefile
new file mode 100644
index 00000000000..fc7d59d4bce
--- /dev/null
+++ b/drivers/firewire/Makefile
@@ -0,0 +1,10 @@
+#
+# Makefile for the Linux IEEE 1394 implementation
+#
+
+fw-core-y += fw-card.o fw-topology.o fw-transaction.o fw-iso.o \
+ fw-device.o fw-cdev.o
+
+obj-$(CONFIG_FIREWIRE) += fw-core.o
+obj-$(CONFIG_FIREWIRE_OHCI) += fw-ohci.o
+obj-$(CONFIG_FIREWIRE_SBP2) += fw-sbp2.o
diff --git a/drivers/firewire/fw-card.c b/drivers/firewire/fw-card.c
new file mode 100644
index 00000000000..636151a64ad
--- /dev/null
+++ b/drivers/firewire/fw-card.c
@@ -0,0 +1,560 @@
+/*
+ * Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/crc-itu-t.h>
+#include "fw-transaction.h"
+#include "fw-topology.h"
+#include "fw-device.h"
+
+int fw_compute_block_crc(u32 *block)
+{
+ __be32 be32_block[256];
+ int i, length;
+
+ length = (*block >> 16) & 0xff;
+ for (i = 0; i < length; i++)
+ be32_block[i] = cpu_to_be32(block[i + 1]);
+ *block |= crc_itu_t(0, (u8 *) be32_block, length * 4);
+
+ return length;
+}
+
+static DEFINE_MUTEX(card_mutex);
+static LIST_HEAD(card_list);
+
+static LIST_HEAD(descriptor_list);
+static int descriptor_count;
+
+#define BIB_CRC(v) ((v) << 0)
+#define BIB_CRC_LENGTH(v) ((v) << 16)
+#define BIB_INFO_LENGTH(v) ((v) << 24)
+
+#define BIB_LINK_SPEED(v) ((v) << 0)
+#define BIB_GENERATION(v) ((v) << 4)
+#define BIB_MAX_ROM(v) ((v) << 8)
+#define BIB_MAX_RECEIVE(v) ((v) << 12)
+#define BIB_CYC_CLK_ACC(v) ((v) << 16)
+#define BIB_PMC ((1) << 27)
+#define BIB_BMC ((1) << 28)
+#define BIB_ISC ((1) << 29)
+#define BIB_CMC ((1) << 30)
+#define BIB_IMC ((1) << 31)
+
+static u32 *
+generate_config_rom(struct fw_card *card, size_t *config_rom_length)
+{
+ struct fw_descriptor *desc;
+ static u32 config_rom[256];
+ int i, j, length;
+
+ /*
+ * Initialize contents of config rom buffer. On the OHCI
+ * controller, block reads to the config rom accesses the host
+ * memory, but quadlet read access the hardware bus info block
+ * registers. That's just crack, but it means we should make
+ * sure the contents of bus info block in host memory mathces
+ * the version stored in the OHCI registers.
+ */
+
+ memset(config_rom, 0, sizeof(config_rom));
+ config_rom[0] = BIB_CRC_LENGTH(4) | BIB_INFO_LENGTH(4) | BIB_CRC(0);
+ config_rom[1] = 0x31333934;
+
+ config_rom[2] =
+ BIB_LINK_SPEED(card->link_speed) |
+ BIB_GENERATION(card->config_rom_generation++ % 14 + 2) |
+ BIB_MAX_ROM(2) |
+ BIB_MAX_RECEIVE(card->max_receive) |
+ BIB_BMC | BIB_ISC | BIB_CMC | BIB_IMC;
+ config_rom[3] = card->guid >> 32;
+ config_rom[4] = card->guid;
+
+ /* Generate root directory. */
+ i = 5;
+ config_rom[i++] = 0;
+ config_rom[i++] = 0x0c0083c0; /* node capabilities */
+ j = i + descriptor_count;
+
+ /* Generate root directory entries for descriptors. */
+ list_for_each_entry (desc, &descriptor_list, link) {
+ if (desc->immediate > 0)
+ config_rom[i++] = desc->immediate;
+ config_rom[i] = desc->key | (j - i);
+ i++;
+ j += desc->length;
+ }
+
+ /* Update root directory length. */
+ config_rom[5] = (i - 5 - 1) << 16;
+
+ /* End of root directory, now copy in descriptors. */
+ list_for_each_entry (desc, &descriptor_list, link) {
+ memcpy(&config_rom[i], desc->data, desc->length * 4);
+ i += desc->length;
+ }
+
+ /* Calculate CRCs for all blocks in the config rom. This
+ * assumes that CRC length and info length are identical for
+ * the bus info block, which is always the case for this
+ * implementation. */
+ for (i = 0; i < j; i += length + 1)
+ length = fw_compute_block_crc(config_rom + i);
+
+ *config_rom_length = j;
+
+ return config_rom;
+}
+
+static void
+update_config_roms(void)
+{
+ struct fw_card *card;
+ u32 *config_rom;
+ size_t length;
+
+ list_for_each_entry (card, &card_list, link) {
+ config_rom = generate_config_rom(card, &length);
+ card->driver->set_config_rom(card, config_rom, length);
+ }
+}
+
+int
+fw_core_add_descriptor(struct fw_descriptor *desc)
+{
+ size_t i;
+
+ /*
+ * Check descriptor is valid; the length of all blocks in the
+ * descriptor has to add up to exactly the length of the
+ * block.
+ */
+ i = 0;
+ while (i < desc->length)
+ i += (desc->data[i] >> 16) + 1;
+
+ if (i != desc->length)
+ return -EINVAL;
+
+ mutex_lock(&card_mutex);
+
+ list_add_tail(&desc->link, &descriptor_list);
+ descriptor_count++;
+ if (desc->immediate > 0)
+ descriptor_count++;
+ update_config_roms();
+
+ mutex_unlock(&card_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL(fw_core_add_descriptor);
+
+void
+fw_core_remove_descriptor(struct fw_descriptor *desc)
+{
+ mutex_lock(&card_mutex);
+
+ list_del(&desc->link);
+ descriptor_count--;
+ if (desc->immediate > 0)
+ descriptor_count--;
+ update_config_roms();
+
+ mutex_unlock(&card_mutex);
+}
+EXPORT_SYMBOL(fw_core_remove_descriptor);
+
+static const char gap_count_table[] = {
+ 63, 5, 7, 8, 10, 13, 16, 18, 21, 24, 26, 29, 32, 35, 37, 40
+};
+
+struct bm_data {
+ struct fw_transaction t;
+ struct {
+ __be32 arg;
+ __be32 data;
+ } lock;
+ u32 old;
+ int rcode;
+ struct completion done;
+};
+
+static void
+complete_bm_lock(struct fw_card *card, int rcode,
+ void *payload, size_t length, void *data)
+{
+ struct bm_data *bmd = data;
+
+ if (rcode == RCODE_COMPLETE)
+ bmd->old = be32_to_cpu(*(__be32 *) payload);
+ bmd->rcode = rcode;
+ complete(&bmd->done);
+}
+
+static void
+fw_card_bm_work(struct work_struct *work)
+{
+ struct fw_card *card = container_of(work, struct fw_card, work.work);
+ struct fw_device *root;
+ struct bm_data bmd;
+ unsigned long flags;
+ int root_id, new_root_id, irm_id, gap_count, generation, grace;
+ int do_reset = 0;
+
+ spin_lock_irqsave(&card->lock, flags);
+
+ generation = card->generation;
+ root = card->root_node->data;
+ root_id = card->root_node->node_id;
+ grace = time_after(jiffies, card->reset_jiffies + DIV_ROUND_UP(HZ, 10));
+
+ if (card->bm_generation + 1 == generation ||
+ (card->bm_generation != generation && grace)) {
+ /*
+ * This first step is to figure out who is IRM and
+ * then try to become bus manager. If the IRM is not
+ * well defined (e.g. does not have an active link
+ * layer or does not responds to our lock request, we
+ * will have to do a little vigilante bus management.
+ * In that case, we do a goto into the gap count logic
+ * so that when we do the reset, we still optimize the
+ * gap count. That could well save a reset in the
+ * next generation.
+ */
+
+ irm_id = card->irm_node->node_id;
+ if (!card->irm_node->link_on) {
+ new_root_id = card->local_node->node_id;
+ fw_notify("IRM has link off, making local node (%02x) root.\n",
+ new_root_id);
+ goto pick_me;
+ }
+
+ bmd.lock.arg = cpu_to_be32(0x3f);
+ bmd.lock.data = cpu_to_be32(card->local_node->node_id);
+
+ spin_unlock_irqrestore(&card->lock, flags);
+
+ init_completion(&bmd.done);
+ fw_send_request(card, &bmd.t, TCODE_LOCK_COMPARE_SWAP,
+ irm_id, generation,
+ SCODE_100, CSR_REGISTER_BASE + CSR_BUS_MANAGER_ID,
+ &bmd.lock, sizeof(bmd.lock),
+ complete_bm_lock, &bmd);
+ wait_for_completion(&bmd.done);
+
+ if (bmd.rcode == RCODE_GENERATION) {
+ /*
+ * Another bus reset happened. Just return,
+ * the BM work has been rescheduled.
+ */
+ return;
+ }
+
+ if (bmd.rcode == RCODE_COMPLETE && bmd.old != 0x3f)
+ /* Somebody else is BM, let them do the work. */
+ return;
+
+ spin_lock_irqsave(&card->lock, flags);
+ if (bmd.rcode != RCODE_COMPLETE) {
+ /*
+ * The lock request failed, maybe the IRM
+ * isn't really IRM capable after all. Let's
+ * do a bus reset and pick the local node as
+ * root, and thus, IRM.
+ */
+ new_root_id = card->local_node->node_id;
+ fw_notify("BM lock failed, making local node (%02x) root.\n",
+ new_root_id);
+ goto pick_me;
+ }
+ } else if (card->bm_generation != generation) {
+ /*
+ * OK, we weren't BM in the last generation, and it's
+ * less than 100ms since last bus reset. Reschedule
+ * this task 100ms from now.
+ */
+ spin_unlock_irqrestore(&card->lock, flags);
+ schedule_delayed_work(&card->work, DIV_ROUND_UP(HZ, 10));
+ return;
+ }
+
+ /*
+ * We're bus manager for this generation, so next step is to
+ * make sure we have an active cycle master and do gap count
+ * optimization.
+ */
+ card->bm_generation = generation;
+
+ if (root == NULL) {
+ /*
+ * Either link_on is false, or we failed to read the
+ * config rom. In either case, pick another root.
+ */
+ new_root_id = card->local_node->node_id;
+ } else if (atomic_read(&root->state) != FW_DEVICE_RUNNING) {
+ /*
+ * If we haven't probed this device yet, bail out now
+ * and let's try again once that's done.
+ */
+ spin_unlock_irqrestore(&card->lock, flags);
+ return;
+ } else if (root->config_rom[2] & BIB_CMC) {
+ /*
+ * FIXME: I suppose we should set the cmstr bit in the
+ * STATE_CLEAR register of this node, as described in
+ * 1394-1995, 8.4.2.6. Also, send out a force root
+ * packet for this node.
+ */
+ new_root_id = root_id;
+ } else {
+ /*
+ * Current root has an active link layer and we
+ * successfully read the config rom, but it's not
+ * cycle master capable.
+ */
+ new_root_id = card->local_node->node_id;
+ }
+
+ pick_me:
+ /* Now figure out what gap count to set. */
+ if (card->topology_type == FW_TOPOLOGY_A &&
+ card->root_node->max_hops < ARRAY_SIZE(gap_count_table))
+ gap_count = gap_count_table[card->root_node->max_hops];
+ else
+ gap_count = 63;
+
+ /*
+ * Finally, figure out if we should do a reset or not. If we've
+ * done less that 5 resets with the same physical topology and we
+ * have either a new root or a new gap count setting, let's do it.
+ */
+
+ if (card->bm_retries++ < 5 &&
+ (card->gap_count != gap_count || new_root_id != root_id))
+ do_reset = 1;
+
+ spin_unlock_irqrestore(&card->lock, flags);
+
+ if (do_reset) {
+ fw_notify("phy config: card %d, new root=%x, gap_count=%d\n",
+ card->index, new_root_id, gap_count);
+ fw_send_phy_config(card, new_root_id, generation, gap_count);
+ fw_core_initiate_bus_reset(card, 1);
+ }
+}
+
+static void
+flush_timer_callback(unsigned long data)
+{
+ struct fw_card *card = (struct fw_card *)data;
+
+ fw_flush_transactions(card);
+}
+
+void
+fw_card_initialize(struct fw_card *card, const struct fw_card_driver *driver,
+ struct device *device)
+{
+ static atomic_t index = ATOMIC_INIT(-1);
+
+ kref_init(&card->kref);
+ card->index = atomic_inc_return(&index);
+ card->driver = driver;
+ card->device = device;
+ card->current_tlabel = 0;
+ card->tlabel_mask = 0;
+ card->color = 0;
+
+ INIT_LIST_HEAD(&card->transaction_list);
+ spin_lock_init(&card->lock);
+ setup_timer(&card->flush_timer,
+ flush_timer_callback, (unsigned long)card);
+
+ card->local_node = NULL;
+
+ INIT_DELAYED_WORK(&card->work, fw_card_bm_work);
+}
+EXPORT_SYMBOL(fw_card_initialize);
+
+int
+fw_card_add(struct fw_card *card,
+ u32 max_receive, u32 link_speed, u64 guid)
+{
+ u32 *config_rom;
+ size_t length;
+
+ card->max_receive = max_receive;
+ card->link_speed = link_speed;
+ card->guid = guid;
+
+ /* Activate link_on bit and contender bit in our self ID packets.*/
+ if (card->driver->update_phy_reg(card, 4, 0,
+ PHY_LINK_ACTIVE | PHY_CONTENDER) < 0)
+ return -EIO;
+
+ /*
+ * The subsystem grabs a reference when the card is added and
+ * drops it when the driver calls fw_core_remove_card.
+ */
+ fw_card_get(card);
+
+ mutex_lock(&card_mutex);
+ config_rom = generate_config_rom(card, &length);
+ list_add_tail(&card->link, &card_list);
+ mutex_unlock(&card_mutex);
+
+ return card->driver->enable(card, config_rom, length);
+}
+EXPORT_SYMBOL(fw_card_add);
+
+
+/*
+ * The next few functions implements a dummy driver that use once a
+ * card driver shuts down an fw_card. This allows the driver to
+ * cleanly unload, as all IO to the card will be handled by the dummy
+ * driver instead of calling into the (possibly) unloaded module. The
+ * dummy driver just fails all IO.
+ */
+
+static int
+dummy_enable(struct fw_card *card, u32 *config_rom, size_t length)
+{
+ BUG();
+ return -1;
+}
+
+static int
+dummy_update_phy_reg(struct fw_card *card, int address,
+ int clear_bits, int set_bits)
+{
+ return -ENODEV;
+}
+
+static int
+dummy_set_config_rom(struct fw_card *card,
+ u32 *config_rom, size_t length)
+{
+ /*
+ * We take the card out of card_list before setting the dummy
+ * driver, so this should never get called.
+ */
+ BUG();
+ return -1;
+}
+
+static void
+dummy_send_request(struct fw_card *card, struct fw_packet *packet)
+{
+ packet->callback(packet, card, -ENODEV);
+}
+
+static void
+dummy_send_response(struct fw_card *card, struct fw_packet *packet)
+{
+ packet->callback(packet, card, -ENODEV);
+}
+
+static int
+dummy_cancel_packet(struct fw_card *card, struct fw_packet *packet)
+{
+ return -ENOENT;
+}
+
+static int
+dummy_enable_phys_dma(struct fw_card *card,
+ int node_id, int generation)
+{
+ return -ENODEV;
+}
+
+static struct fw_card_driver dummy_driver = {
+ .name = "dummy",
+ .enable = dummy_enable,
+ .update_phy_reg = dummy_update_phy_reg,
+ .set_config_rom = dummy_set_config_rom,
+ .send_request = dummy_send_request,
+ .cancel_packet = dummy_cancel_packet,
+ .send_response = dummy_send_response,
+ .enable_phys_dma = dummy_enable_phys_dma,
+};
+
+void
+fw_core_remove_card(struct fw_card *card)
+{
+ card->driver->update_phy_reg(card, 4,
+ PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
+ fw_core_initiate_bus_reset(card, 1);
+
+ mutex_lock(&card_mutex);
+ list_del(&card->link);
+ mutex_unlock(&card_mutex);
+
+ /* Set up the dummy driver. */
+ card->driver = &dummy_driver;
+
+ fw_flush_transactions(card);
+
+ fw_destroy_nodes(card);
+
+ fw_card_put(card);
+}
+EXPORT_SYMBOL(fw_core_remove_card);
+
+struct fw_card *
+fw_card_get(struct fw_card *card)
+{
+ kref_get(&card->kref);
+
+ return card;
+}
+EXPORT_SYMBOL(fw_card_get);
+
+static void
+release_card(struct kref *kref)
+{
+ struct fw_card *card = container_of(kref, struct fw_card, kref);
+
+ kfree(card);
+}
+
+/*
+ * An assumption for fw_card_put() is that the card driver allocates
+ * the fw_card struct with kalloc and that it has been shut down
+ * before the last ref is dropped.
+ */
+void
+fw_card_put(struct fw_card *card)
+{
+ kref_put(&card->kref, release_card);
+}
+EXPORT_SYMBOL(fw_card_put);
+
+int
+fw_core_initiate_bus_reset(struct fw_card *card, int short_reset)
+{
+ int reg = short_reset ? 5 : 1;
+ int bit = short_reset ? PHY_BUS_SHORT_RESET : PHY_BUS_RESET;
+
+ return card->driver->update_phy_reg(card, reg, 0, bit);
+}
+EXPORT_SYMBOL(fw_core_initiate_bus_reset);
diff --git a/drivers/firewire/fw-cdev.c b/drivers/firewire/fw-cdev.c
new file mode 100644
index 00000000000..0fa5bd54c6a
--- /dev/null
+++ b/drivers/firewire/fw-cdev.c
@@ -0,0 +1,961 @@
+/*
+ * Char device for device raw access
+ *
+ * Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/wait.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/vmalloc.h>
+#include <linux/poll.h>
+#include <linux/delay.h>
+#include <linux/mm.h>
+#include <linux/idr.h>
+#include <linux/compat.h>
+#include <linux/firewire-cdev.h>
+#include <asm/uaccess.h>
+#include "fw-transaction.h"
+#include "fw-topology.h"
+#include "fw-device.h"
+
+struct client;
+struct client_resource {
+ struct list_head link;
+ void (*release)(struct client *client, struct client_resource *r);
+ u32 handle;
+};
+
+/*
+ * dequeue_event() just kfree()'s the event, so the event has to be
+ * the first field in the struct.
+ */
+
+struct event {
+ struct { void *data; size_t size; } v[2];
+ struct list_head link;
+};
+
+struct bus_reset {
+ struct event event;
+ struct fw_cdev_event_bus_reset reset;
+};
+
+struct response {
+ struct event event;
+ struct fw_transaction transaction;
+ struct client *client;
+ struct client_resource resource;
+ struct fw_cdev_event_response response;
+};
+
+struct iso_interrupt {
+ struct event event;
+ struct fw_cdev_event_iso_interrupt interrupt;
+};
+
+struct client {
+ u32 version;
+ struct fw_device *device;
+ spinlock_t lock;
+ u32 resource_handle;
+ struct list_head resource_list;
+ struct list_head event_list;
+ wait_queue_head_t wait;
+ u64 bus_reset_closure;
+
+ struct fw_iso_context *iso_context;
+ u64 iso_closure;
+ struct fw_iso_buffer buffer;
+ unsigned long vm_start;
+
+ struct list_head link;
+};
+
+static inline void __user *
+u64_to_uptr(__u64 value)
+{
+ return (void __user *)(unsigned long)value;
+}
+
+static inline __u64
+uptr_to_u64(void __user *ptr)
+{
+ return (__u64)(unsigned long)ptr;
+}
+
+static int fw_device_op_open(struct inode *inode, struct file *file)
+{
+ struct fw_device *device;
+ struct client *client;
+ unsigned long flags;
+
+ device = fw_device_from_devt(inode->i_rdev);
+ if (device == NULL)
+ return -ENODEV;
+
+ client = kzalloc(sizeof(*client), GFP_KERNEL);
+ if (client == NULL)
+ return -ENOMEM;
+
+ client->device = fw_device_get(device);
+ INIT_LIST_HEAD(&client->event_list);
+ INIT_LIST_HEAD(&client->resource_list);
+ spin_lock_init(&client->lock);
+ init_waitqueue_head(&client->wait);
+
+ file->private_data = client;
+
+ spin_lock_irqsave(&device->card->lock, flags);
+ list_add_tail(&client->link, &device->client_list);
+ spin_unlock_irqrestore(&device->card->lock, flags);
+
+ return 0;
+}
+
+static void queue_event(struct client *client, struct event *event,
+ void *data0, size_t size0, void *data1, size_t size1)
+{
+ unsigned long flags;
+
+ event->v[0].data = data0;
+ event->v[0].size = size0;
+ event->v[1].data = data1;
+ event->v[1].size = size1;
+
+ spin_lock_irqsave(&client->lock, flags);
+
+ list_add_tail(&event->link, &client->event_list);
+ wake_up_interruptible(&client->wait);
+
+ spin_unlock_irqrestore(&client->lock, flags);
+}
+
+static int
+dequeue_event(struct client *client, char __user *buffer, size_t count)
+{
+ unsigned long flags;
+ struct event *event;
+ size_t size, total;
+ int i, retval;
+
+ retval = wait_event_interruptible(client->wait,
+ !list_empty(&client->event_list) ||
+ fw_device_is_shutdown(client->device));
+ if (retval < 0)
+ return retval;
+
+ if (list_empty(&client->event_list) &&
+ fw_device_is_shutdown(client->device))
+ return -ENODEV;
+
+ spin_lock_irqsave(&client->lock, flags);
+ event = container_of(client->event_list.next, struct event, link);
+ list_del(&event->link);
+ spin_unlock_irqrestore(&client->lock, flags);
+
+ total = 0;
+ for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) {
+ size = min(event->v[i].size, count - total);
+ if (copy_to_user(buffer + total, event->v[i].data, size)) {
+ retval = -EFAULT;
+ goto out;
+ }
+ total += size;
+ }
+ retval = total;
+
+ out:
+ kfree(event);
+
+ return retval;
+}
+
+static ssize_t
+fw_device_op_read(struct file *file,
+ char __user *buffer, size_t count, loff_t *offset)
+{
+ struct client *client = file->private_data;
+
+ return dequeue_event(client, buffer, count);
+}
+
+static void
+fill_bus_reset_event(struct fw_cdev_event_bus_reset *event,
+ struct client *client)
+{
+ struct fw_card *card = client->device->card;
+
+ event->closure = client->bus_reset_closure;
+ event->type = FW_CDEV_EVENT_BUS_RESET;
+ event->node_id = client->device->node_id;
+ event->local_node_id = card->local_node->node_id;
+ event->bm_node_id = 0; /* FIXME: We don't track the BM. */
+ event->irm_node_id = card->irm_node->node_id;
+ event->root_node_id = card->root_node->node_id;
+ event->generation = card->generation;
+}
+
+static void
+for_each_client(struct fw_device *device,
+ void (*callback)(struct client *client))
+{
+ struct fw_card *card = device->card;
+ struct client *c;
+ unsigned long flags;
+
+ spin_lock_irqsave(&card->lock, flags);
+
+ list_for_each_entry(c, &device->client_list, link)
+ callback(c);
+
+ spin_unlock_irqrestore(&card->lock, flags);
+}
+
+static void
+queue_bus_reset_event(struct client *client)
+{
+ struct bus_reset *bus_reset;
+
+ bus_reset = kzalloc(sizeof(*bus_reset), GFP_ATOMIC);
+ if (bus_reset == NULL) {
+ fw_notify("Out of memory when allocating bus reset event\n");
+ return;
+ }
+
+ fill_bus_reset_event(&bus_reset->reset, client);
+
+ queue_event(client, &bus_reset->event,
+ &bus_reset->reset, sizeof(bus_reset->reset), NULL, 0);
+}
+
+void fw_device_cdev_update(struct fw_device *device)
+{
+ for_each_client(device, queue_bus_reset_event);
+}
+
+static void wake_up_client(struct client *client)
+{
+ wake_up_interruptible(&client->wait);
+}
+
+void fw_device_cdev_remove(struct fw_device *device)
+{
+ for_each_client(device, wake_up_client);
+}
+
+static int ioctl_get_info(struct client *client, void *buffer)
+{
+ struct fw_cdev_get_info *get_info = buffer;
+ struct fw_cdev_event_bus_reset bus_reset;
+
+ client->version = get_info->version;
+ get_info->version = FW_CDEV_VERSION;
+
+ if (get_info->rom != 0) {
+ void __user *uptr = u64_to_uptr(get_info->rom);
+ size_t want = get_info->rom_length;
+ size_t have = client->device->config_rom_length * 4;
+
+ if (copy_to_user(uptr, client->device->config_rom,
+ min(want, have)))
+ return -EFAULT;
+ }
+ get_info->rom_length = client->device->config_rom_length * 4;
+
+ client->bus_reset_closure = get_info->bus_reset_closure;
+ if (get_info->bus_reset != 0) {
+ void __user *uptr = u64_to_uptr(get_info->bus_reset);
+
+ fill_bus_reset_event(&bus_reset, client);
+ if (copy_to_user(uptr, &bus_reset, sizeof(bus_reset)))
+ return -EFAULT;
+ }
+
+ get_info->card = client->device->card->index;
+
+ return 0;
+}
+
+static void
+add_client_resource(struct client *client, struct client_resource *resource)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&client->lock, flags);
+ list_add_tail(&resource->link, &client->resource_list);
+ resource->handle = client->resource_handle++;
+ spin_unlock_irqrestore(&client->lock, flags);
+}
+
+static int
+release_client_resource(struct client *client, u32 handle,
+ struct client_resource **resource)
+{
+ struct client_resource *r;
+ unsigned long flags;
+
+ spin_lock_irqsave(&client->lock, flags);
+ list_for_each_entry(r, &client->resource_list, link) {
+ if (r->handle == handle) {
+ list_del(&r->link);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&client->lock, flags);
+
+ if (&r->link == &client->resource_list)
+ return -EINVAL;
+
+ if (resource)
+ *resource = r;
+ else
+ r->release(client, r);
+
+ return 0;
+}
+
+static void
+release_transaction(struct client *client, struct client_resource *resource)
+{
+ struct response *response =
+ container_of(resource, struct response, resource);
+
+ fw_cancel_transaction(client->device->card, &response->transaction);
+}
+
+static void
+complete_transaction(struct fw_card *card, int rcode,
+ void *payload, size_t length, void *data)
+{
+ struct response *response = data;
+ struct client *client = response->client;
+ unsigned long flags;
+
+ if (length < response->response.length)
+ response->response.length = length;
+ if (rcode == RCODE_COMPLETE)
+ memcpy(response->response.data, payload,
+ response->response.length);
+
+ spin_lock_irqsave(&client->lock, flags);
+ list_del(&response->resource.link);
+ spin_unlock_irqrestore(&client->lock, flags);
+
+ response->response.type = FW_CDEV_EVENT_RESPONSE;
+ response->response.rcode = rcode;
+ queue_event(client, &response->event,
+ &response->response, sizeof(response->response),
+ response->response.data, response->response.length);
+}
+
+static ssize_t ioctl_send_request(struct client *client, void *buffer)
+{
+ struct fw_device *device = client->device;
+ struct fw_cdev_send_request *request = buffer;
+ struct response *response;
+
+ /* What is the biggest size we'll accept, really? */
+ if (request->length > 4096)
+ return -EINVAL;
+
+ response = kmalloc(sizeof(*response) + request->length, GFP_KERNEL);
+ if (response == NULL)
+ return -ENOMEM;
+
+ response->client = client;
+ response->response.length = request->length;
+ response->response.closure = request->closure;
+
+ if (request->data &&
+ copy_from_user(response->response.data,
+ u64_to_uptr(request->data), request->length)) {
+ kfree(response);
+ return -EFAULT;
+ }
+
+ response->resource.release = release_transaction;
+ add_client_resource(client, &response->resource);
+
+ fw_send_request(device->card, &response->transaction,
+ request->tcode & 0x1f,
+ device->node->node_id,
+ request->generation,
+ device->node->max_speed,
+ request->offset,
+ response->response.data, request->length,
+ complete_transaction, response);
+
+ if (request->data)
+ return sizeof(request) + request->length;
+ else
+ return sizeof(request);
+}
+
+struct address_handler {
+ struct fw_address_handler handler;
+ __u64 closure;
+ struct client *client;
+ struct client_resource resource;
+};
+
+struct request {
+ struct fw_request *request;
+ void *data;
+ size_t length;
+ struct client_resource resource;
+};
+
+struct request_event {
+ struct event event;
+ struct fw_cdev_event_request request;
+};
+
+static void
+release_request(struct client *client, struct client_resource *resource)
+{
+ struct request *request =
+ container_of(resource, struct request, resource);
+
+ fw_send_response(client->device->card, request->request,
+ RCODE_CONFLICT_ERROR);
+ kfree(request);
+}
+
+static void
+handle_request(struct fw_card *card, struct fw_request *r,
+ int tcode, int destination, int source,
+ int generation, int speed,
+ unsigned long long offset,
+ void *payload, size_t length, void *callback_data)
+{
+ struct address_handler *handler = callback_data;
+ struct request *request;
+ struct request_event *e;
+ struct client *client = handler->client;
+
+ request = kmalloc(sizeof(*request), GFP_ATOMIC);
+ e = kmalloc(sizeof(*e), GFP_ATOMIC);
+ if (request == NULL || e == NULL) {
+ kfree(request);
+ kfree(e);
+ fw_send_response(card, r, RCODE_CONFLICT_ERROR);
+ return;
+ }
+
+ request->request = r;
+ request->data = payload;
+ request->length = length;
+
+ request->resource.release = release_request;
+ add_client_resource(client, &request->resource);
+
+ e->request.type = FW_CDEV_EVENT_REQUEST;
+ e->request.tcode = tcode;
+ e->request.offset = offset;
+ e->request.length = length;
+ e->request.handle = request->resource.handle;
+ e->request.closure = handler->closure;
+
+ queue_event(client, &e->event,
+ &e->request, sizeof(e->request), payload, length);
+}
+
+static void
+release_address_handler(struct client *client,
+ struct client_resource *resource)
+{
+ struct address_handler *handler =
+ container_of(resource, struct address_handler, resource);
+
+ fw_core_remove_address_handler(&handler->handler);
+ kfree(handler);
+}
+
+static int ioctl_allocate(struct client *client, void *buffer)
+{
+ struct fw_cdev_allocate *request = buffer;
+ struct address_handler *handler;
+ struct fw_address_region region;
+
+ handler = kmalloc(sizeof(*handler), GFP_KERNEL);
+ if (handler == NULL)
+ return -ENOMEM;
+
+ region.start = request->offset;
+ region.end = request->offset + request->length;
+ handler->handler.length = request->length;
+ handler->handler.address_callback = handle_request;
+ handler->handler.callback_data = handler;
+ handler->closure = request->closure;
+ handler->client = client;
+
+ if (fw_core_add_address_handler(&handler->handler, &region) < 0) {
+ kfree(handler);
+ return -EBUSY;
+ }
+
+ handler->resource.release = release_address_handler;
+ add_client_resource(client, &handler->resource);
+ request->handle = handler->resource.handle;
+
+ return 0;
+}
+
+static int ioctl_deallocate(struct client *client, void *buffer)
+{
+ struct fw_cdev_deallocate *request = buffer;
+
+ return release_client_resource(client, request->handle, NULL);
+}
+
+static int ioctl_send_response(struct client *client, void *buffer)
+{
+ struct fw_cdev_send_response *request = buffer;
+ struct client_resource *resource;
+ struct request *r;
+
+ if (release_client_resource(client, request->handle, &resource) < 0)
+ return -EINVAL;
+ r = container_of(resource, struct request, resource);
+ if (request->length < r->length)
+ r->length = request->length;
+ if (copy_from_user(r->data, u64_to_uptr(request->data), r->length))
+ return -EFAULT;
+
+ fw_send_response(client->device->card, r->request, request->rcode);
+ kfree(r);
+
+ return 0;
+}
+
+static int ioctl_initiate_bus_reset(struct client *client, void *buffer)
+{
+ struct fw_cdev_initiate_bus_reset *request = buffer;
+ int short_reset;
+
+ short_reset = (request->type == FW_CDEV_SHORT_RESET);
+
+ return fw_core_initiate_bus_reset(client->device->card, short_reset);
+}
+
+struct descriptor {
+ struct fw_descriptor d;
+ struct client_resource resource;
+ u32 data[0];
+};
+
+static void release_descriptor(struct client *client,
+ struct client_resource *resource)
+{
+ struct descriptor *descriptor =
+ container_of(resource, struct descriptor, resource);
+
+ fw_core_remove_descriptor(&descriptor->d);
+ kfree(descriptor);
+}
+
+static int ioctl_add_descriptor(struct client *client, void *buffer)
+{
+ struct fw_cdev_add_descriptor *request = buffer;
+ struct descriptor *descriptor;
+ int retval;
+
+ if (request->length > 256)
+ return -EINVAL;
+
+ descriptor =
+ kmalloc(sizeof(*descriptor) + request->length * 4, GFP_KERNEL);
+ if (descriptor == NULL)
+ return -ENOMEM;
+
+ if (copy_from_user(descriptor->data,
+ u64_to_uptr(request->data), request->length * 4)) {
+ kfree(descriptor);
+ return -EFAULT;
+ }
+
+ descriptor->d.length = request->length;
+ descriptor->d.immediate = request->immediate;
+ descriptor->d.key = request->key;
+ descriptor->d.data = descriptor->data;
+
+ retval = fw_core_add_descriptor(&descriptor->d);
+ if (retval < 0) {
+ kfree(descriptor);
+ return retval;
+ }
+
+ descriptor->resource.release = release_descriptor;
+ add_client_resource(client, &descriptor->resource);
+ request->handle = descriptor->resource.handle;
+
+ return 0;
+}
+
+static int ioctl_remove_descriptor(struct client *client, void *buffer)
+{
+ struct fw_cdev_remove_descriptor *request = buffer;
+
+ return release_client_resource(client, request->handle, NULL);
+}
+
+static void
+iso_callback(struct fw_iso_context *context, u32 cycle,
+ size_t header_length, void *header, void *data)
+{
+ struct client *client = data;
+ struct iso_interrupt *interrupt;
+
+ interrupt = kzalloc(sizeof(*interrupt) + header_length, GFP_ATOMIC);
+ if (interrupt == NULL)
+ return;
+
+ interrupt->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT;
+ interrupt->interrupt.closure = client->iso_closure;
+ interrupt->interrupt.cycle = cycle;
+ interrupt->interrupt.header_length = header_length;
+ memcpy(interrupt->interrupt.header, header, header_length);
+ queue_event(client, &interrupt->event,
+ &interrupt->interrupt,
+ sizeof(interrupt->interrupt) + header_length, NULL, 0);
+}
+
+static int ioctl_create_iso_context(struct client *client, void *buffer)
+{
+ struct fw_cdev_create_iso_context *request = buffer;
+
+ if (request->channel > 63)
+ return -EINVAL;
+
+ switch (request->type) {
+ case FW_ISO_CONTEXT_RECEIVE:
+ if (request->header_size < 4 || (request->header_size & 3))
+ return -EINVAL;
+
+ break;
+
+ case FW_ISO_CONTEXT_TRANSMIT:
+ if (request->speed > SCODE_3200)
+ return -EINVAL;
+
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ client->iso_closure = request->closure;
+ client->iso_context = fw_iso_context_create(client->device->card,
+ request->type,
+ request->channel,
+ request->speed,
+ request->header_size,
+ iso_callback, client);
+ if (IS_ERR(client->iso_context))
+ return PTR_ERR(client->iso_context);
+
+ /* We only support one context at this time. */
+ request->handle = 0;
+
+ return 0;
+}
+
+static int ioctl_queue_iso(struct client *client, void *buffer)
+{
+ struct fw_cdev_queue_iso *request = buffer;
+ struct fw_cdev_iso_packet __user *p, *end, *next;
+ struct fw_iso_context *ctx = client->iso_context;
+ unsigned long payload, buffer_end, header_length;
+ int count;
+ struct {
+ struct fw_iso_packet packet;
+ u8 header[256];
+ } u;
+
+ if (ctx == NULL || request->handle != 0)
+ return -EINVAL;
+
+ /*
+ * If the user passes a non-NULL data pointer, has mmap()'ed
+ * the iso buffer, and the pointer points inside the buffer,
+ * we setup the payload pointers accordingly. Otherwise we
+ * set them both to 0, which will still let packets with
+ * payload_length == 0 through. In other words, if no packets
+ * use the indirect payload, the iso buffer need not be mapped
+ * and the request->data pointer is ignored.
+ */
+
+ payload = (unsigned long)request->data - client->vm_start;
+ buffer_end = client->buffer.page_count << PAGE_SHIFT;
+ if (request->data == 0 || client->buffer.pages == NULL ||
+ payload >= buffer_end) {
+ payload = 0;
+ buffer_end = 0;
+ }
+
+ if (!access_ok(VERIFY_READ, request->packets, request->size))
+ return -EFAULT;
+
+ p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(request->packets);
+ end = (void __user *)p + request->size;
+ count = 0;
+ while (p < end) {
+ if (__copy_from_user(&u.packet, p, sizeof(*p)))
+ return -EFAULT;
+
+ if (ctx->type == FW_ISO_CONTEXT_TRANSMIT) {
+ header_length = u.packet.header_length;
+ } else {
+ /*
+ * We require that header_length is a multiple of
+ * the fixed header size, ctx->header_size.
+ */
+ if (ctx->header_size == 0) {
+ if (u.packet.header_length > 0)
+ return -EINVAL;
+ } else if (u.packet.header_length % ctx->header_size != 0) {
+ return -EINVAL;
+ }
+ header_length = 0;
+ }
+
+ next = (struct fw_cdev_iso_packet __user *)
+ &p->header[header_length / 4];
+ if (next > end)
+ return -EINVAL;
+ if (__copy_from_user
+ (u.packet.header, p->header, header_length))
+ return -EFAULT;
+ if (u.packet.skip && ctx->type == FW_ISO_CONTEXT_TRANSMIT &&
+ u.packet.header_length + u.packet.payload_length > 0)
+ return -EINVAL;
+ if (payload + u.packet.payload_length > buffer_end)
+ return -EINVAL;
+
+ if (fw_iso_context_queue(ctx, &u.packet,
+ &client->buffer, payload))
+ break;
+
+ p = next;
+ payload += u.packet.payload_length;
+ count++;
+ }
+
+ request->size -= uptr_to_u64(p) - request->packets;
+ request->packets = uptr_to_u64(p);
+ request->data = client->vm_start + payload;
+
+ return count;
+}
+
+static int ioctl_start_iso(struct client *client, void *buffer)
+{
+ struct fw_cdev_start_iso *request = buffer;
+
+ if (request->handle != 0)
+ return -EINVAL;
+ if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE) {
+ if (request->tags == 0 || request->tags > 15)
+ return -EINVAL;
+
+ if (request->sync > 15)
+ return -EINVAL;
+ }
+
+ return fw_iso_context_start(client->iso_context, request->cycle,
+ request->sync, request->tags);
+}
+
+static int ioctl_stop_iso(struct client *client, void *buffer)
+{
+ struct fw_cdev_stop_iso *request = buffer;
+
+ if (request->handle != 0)
+ return -EINVAL;
+
+ return fw_iso_context_stop(client->iso_context);
+}
+
+static int (* const ioctl_handlers[])(struct client *client, void *buffer) = {
+ ioctl_get_info,
+ ioctl_send_request,
+ ioctl_allocate,
+ ioctl_deallocate,
+ ioctl_send_response,
+ ioctl_initiate_bus_reset,
+ ioctl_add_descriptor,
+ ioctl_remove_descriptor,
+ ioctl_create_iso_context,
+ ioctl_queue_iso,
+ ioctl_start_iso,
+ ioctl_stop_iso,
+};
+
+static int
+dispatch_ioctl(struct client *client, unsigned int cmd, void __user *arg)
+{
+ char buffer[256];
+ int retval;
+
+ if (_IOC_TYPE(cmd) != '#' ||
+ _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers))
+ return -EINVAL;
+
+ if (_IOC_DIR(cmd) & _IOC_WRITE) {
+ if (_IOC_SIZE(cmd) > sizeof(buffer) ||
+ copy_from_user(buffer, arg, _IOC_SIZE(cmd)))
+ return -EFAULT;
+ }
+
+ retval = ioctl_handlers[_IOC_NR(cmd)](client, buffer);
+ if (retval < 0)
+ return retval;
+
+ if (_IOC_DIR(cmd) & _IOC_READ) {
+ if (_IOC_SIZE(cmd) > sizeof(buffer) ||
+ copy_to_user(arg, buffer, _IOC_SIZE(cmd)))
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static long
+fw_device_op_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ struct client *client = file->private_data;
+
+ return dispatch_ioctl(client, cmd, (void __user *) arg);
+}
+
+#ifdef CONFIG_COMPAT
+static long
+fw_device_op_compat_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ struct client *client = file->private_data;
+
+ return dispatch_ioctl(client, cmd, compat_ptr(arg));
+}
+#endif
+
+static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct client *client = file->private_data;
+ enum dma_data_direction direction;
+ unsigned long size;
+ int page_count, retval;
+
+ /* FIXME: We could support multiple buffers, but we don't. */
+ if (client->buffer.pages != NULL)
+ return -EBUSY;
+
+ if (!(vma->vm_flags & VM_SHARED))
+ return -EINVAL;
+
+ if (vma->vm_start & ~PAGE_MASK)
+ return -EINVAL;
+
+ client->vm_start = vma->vm_start;
+ size = vma->vm_end - vma->vm_start;
+ page_count = size >> PAGE_SHIFT;
+ if (size & ~PAGE_MASK)
+ return -EINVAL;
+
+ if (vma->vm_flags & VM_WRITE)
+ direction = DMA_TO_DEVICE;
+ else
+ direction = DMA_FROM_DEVICE;
+
+ retval = fw_iso_buffer_init(&client->buffer, client->device->card,
+ page_count, direction);
+ if (retval < 0)
+ return retval;
+
+ retval = fw_iso_buffer_map(&client->buffer, vma);
+ if (retval < 0)
+ fw_iso_buffer_destroy(&client->buffer, client->device->card);
+
+ return retval;
+}
+
+static int fw_device_op_release(struct inode *inode, struct file *file)
+{
+ struct client *client = file->private_data;
+ struct event *e, *next_e;
+ struct client_resource *r, *next_r;
+ unsigned long flags;
+
+ if (client->buffer.pages)
+ fw_iso_buffer_destroy(&client->buffer, client->device->card);
+
+ if (client->iso_context)
+ fw_iso_context_destroy(client->iso_context);
+
+ list_for_each_entry_safe(r, next_r, &client->resource_list, link)
+ r->release(client, r);
+
+ /*
+ * FIXME: We should wait for the async tasklets to stop
+ * running before freeing the memory.
+ */
+
+ list_for_each_entry_safe(e, next_e, &client->event_list, link)
+ kfree(e);
+
+ spin_lock_irqsave(&client->device->card->lock, flags);
+ list_del(&client->link);
+ spin_unlock_irqrestore(&client->device->card->lock, flags);
+
+ fw_device_put(client->device);
+ kfree(client);
+
+ return 0;
+}
+
+static unsigned int fw_device_op_poll(struct file *file, poll_table * pt)
+{
+ struct client *client = file->private_data;
+ unsigned int mask = 0;
+
+ poll_wait(file, &client->wait, pt);
+
+ if (fw_device_is_shutdown(client->device))
+ mask |= POLLHUP | POLLERR;
+ if (!list_empty(&client->event_list))
+ mask |= POLLIN | POLLRDNORM;
+
+ return mask;
+}
+
+const struct file_operations fw_device_ops = {
+ .owner = THIS_MODULE,
+ .open = fw_device_op_open,
+ .read = fw_device_op_read,
+ .unlocked_ioctl = fw_device_op_ioctl,
+ .poll = fw_device_op_poll,
+ .release = fw_device_op_release,
+ .mmap = fw_device_op_mmap,
+
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = fw_device_op_compat_ioctl,
+#endif
+};
diff --git a/drivers/firewire/fw-device.c b/drivers/firewire/fw-device.c
new file mode 100644
index 00000000000..c1ce465d971
--- /dev/null
+++ b/drivers/firewire/fw-device.c
@@ -0,0 +1,813 @@
+/*
+ * Device probing and sysfs code.
+ *
+ * Copyright (C) 2005-2006 Kristian Hoegsberg <krh@bitplanet.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/wait.h>
+#include <linux/errno.h>
+#include <linux/kthread.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/idr.h>
+#include <linux/rwsem.h>
+#include <asm/semaphore.h>
+#include <linux/ctype.h>
+#include "fw-transaction.h"
+#include "fw-topology.h"
+#include "fw-device.h"
+
+void fw_csr_iterator_init(struct fw_csr_iterator *ci, u32 * p)
+{
+ ci->p = p + 1;
+ ci->end = ci->p + (p[0] >> 16);
+}
+EXPORT_SYMBOL(fw_csr_iterator_init);
+
+int fw_csr_iterator_next(struct fw_csr_iterator *ci, int *key, int *value)
+{
+ *key = *ci->p >> 24;
+ *value = *ci->p & 0xffffff;
+
+ return ci->p++ < ci->end;
+}
+EXPORT_SYMBOL(fw_csr_iterator_next);
+
+static int is_fw_unit(struct device *dev);
+
+static int match_unit_directory(u32 * directory, const struct fw_device_id *id)
+{
+ struct fw_csr_iterator ci;
+ int key, value, match;
+
+ match = 0;
+ fw_csr_iterator_init(&ci, directory);
+ while (fw_csr_iterator_next(&ci, &key, &value)) {
+ if (key == CSR_VENDOR && value == id->vendor)
+ match |= FW_MATCH_VENDOR;
+ if (key == CSR_MODEL && value == id->model)
+ match |= FW_MATCH_MODEL;
+ if (key == CSR_SPECIFIER_ID && value == id->specifier_id)
+ match |= FW_MATCH_SPECIFIER_ID;
+ if (key == CSR_VERSION && value == id->version)
+ match |= FW_MATCH_VERSION;
+ }
+
+ return (match & id->match_flags) == id->match_flags;
+}
+
+static int fw_unit_match(struct device *dev, struct device_driver *drv)
+{
+ struct fw_unit *unit = fw_unit(dev);
+ struct fw_driver *driver = fw_driver(drv);
+ int i;
+
+ /* We only allow binding to fw_units. */
+ if (!is_fw_unit(dev))
+ return 0;
+
+ for (i = 0; driver->id_table[i].match_flags != 0; i++) {
+ if (match_unit_directory(unit->directory, &driver->id_table[i]))
+ return 1;
+ }
+
+ return 0;
+}
+
+static int get_modalias(struct fw_unit *unit, char *buffer, size_t buffer_size)
+{
+ struct fw_device *device = fw_device(unit->device.parent);
+ struct fw_csr_iterator ci;
+
+ int key, value;
+ int vendor = 0;
+ int model = 0;
+ int specifier_id = 0;
+ int version = 0;
+
+ fw_csr_iterator_init(&ci, &device->config_rom[5]);
+ while (fw_csr_iterator_next(&ci, &key, &value)) {
+ switch (key) {
+ case CSR_VENDOR:
+ vendor = value;
+ break;
+ case CSR_MODEL:
+ model = value;
+ break;
+ }
+ }
+
+ fw_csr_iterator_init(&ci, unit->directory);
+ while (fw_csr_iterator_next(&ci, &key, &value)) {
+ switch (key) {
+ case CSR_SPECIFIER_ID:
+ specifier_id = value;
+ break;
+ case CSR_VERSION:
+ version = value;
+ break;
+ }
+ }
+
+ return snprintf(buffer, buffer_size,
+ "ieee1394:ven%08Xmo%08Xsp%08Xver%08X",
+ vendor, model, specifier_id, version);
+}
+
+static int
+fw_unit_uevent(struct device *dev, char **envp, int num_envp,
+ char *buffer, int buffer_size)
+{
+ struct fw_unit *unit = fw_unit(dev);
+ char modalias[64];
+ int length = 0;
+ int i = 0;
+
+ get_modalias(unit, modalias, sizeof(modalias));
+
+ if (add_uevent_var(envp, num_envp, &i,
+ buffer, buffer_size, &length,
+ "MODALIAS=%s", modalias))
+ return -ENOMEM;
+
+ envp[i] = NULL;
+
+ return 0;
+}
+
+struct bus_type fw_bus_type = {
+ .name = "firewire",
+ .match = fw_unit_match,
+};
+EXPORT_SYMBOL(fw_bus_type);
+
+struct fw_device *fw_device_get(struct fw_device *device)
+{
+ get_device(&device->device);
+
+ return device;
+}
+
+void fw_device_put(struct fw_device *device)
+{
+ put_device(&device->device);
+}
+
+static void fw_device_release(struct device *dev)
+{
+ struct fw_device *device = fw_device(dev);
+ unsigned long flags;
+
+ /*
+ * Take the card lock so we don't set this to NULL while a
+ * FW_NODE_UPDATED callback is being handled.
+ */
+ spin_lock_irqsave(&device->card->lock, flags);
+ device->node->data = NULL;
+ spin_unlock_irqrestore(&device->card->lock, flags);
+
+ fw_node_put(device->node);
+ fw_card_put(device->card);
+ kfree(device->config_rom);
+ kfree(device);
+}
+
+int fw_device_enable_phys_dma(struct fw_device *device)
+{
+ return device->card->driver->enable_phys_dma(device->card,
+ device->node_id,
+ device->generation);
+}
+EXPORT_SYMBOL(fw_device_enable_phys_dma);
+
+struct config_rom_attribute {
+ struct device_attribute attr;
+ u32 key;
+};
+
+static ssize_t
+show_immediate(struct device *dev, struct device_attribute *dattr, char *buf)
+{
+ struct config_rom_attribute *attr =
+ container_of(dattr, struct config_rom_attribute, attr);
+ struct fw_csr_iterator ci;
+ u32 *dir;
+ int key, value;
+
+ if (is_fw_unit(dev))
+ dir = fw_unit(dev)->directory;
+ else
+ dir = fw_device(dev)->config_rom + 5;
+
+ fw_csr_iterator_init(&ci, dir);
+ while (fw_csr_iterator_next(&ci, &key, &value))
+ if (attr->key == key)
+ return snprintf(buf, buf ? PAGE_SIZE : 0,
+ "0x%06x\n", value);
+
+ return -ENOENT;
+}
+
+#define IMMEDIATE_ATTR(name, key) \
+ { __ATTR(name, S_IRUGO, show_immediate, NULL), key }
+
+static ssize_t
+show_text_leaf(struct device *dev, struct device_attribute *dattr, char *buf)
+{
+ struct config_rom_attribute *attr =
+ container_of(dattr, struct config_rom_attribute, attr);
+ struct fw_csr_iterator ci;
+ u32 *dir, *block = NULL, *p, *end;
+ int length, key, value, last_key = 0;
+ char *b;
+
+ if (is_fw_unit(dev))
+ dir = fw_unit(dev)->directory;
+ else
+ dir = fw_device(dev)->config_rom + 5;
+
+ fw_csr_iterator_init(&ci, dir);
+ while (fw_csr_iterator_next(&ci, &key, &value)) {
+ if (attr->key == last_key &&
+ key == (CSR_DESCRIPTOR | CSR_LEAF))
+ block = ci.p - 1 + value;
+ last_key = key;
+ }
+
+ if (block == NULL)
+ return -ENOENT;
+
+ length = min(block[0] >> 16, 256U);
+ if (length < 3)
+ return -ENOENT;
+
+ if (block[1] != 0 || block[2] != 0)
+ /* Unknown encoding. */
+ return -ENOENT;
+
+ if (buf == NULL)
+ return length * 4;
+
+ b = buf;
+ end = &block[length + 1];
+ for (p = &block[3]; p < end; p++, b += 4)
+ * (u32 *) b = (__force u32) __cpu_to_be32(*p);
+
+ /* Strip trailing whitespace and add newline. */
+ while (b--, (isspace(*b) || *b == '\0') && b > buf);
+ strcpy(b + 1, "\n");
+
+ return b + 2 - buf;
+}
+
+#define TEXT_LEAF_ATTR(name, key) \
+ { __ATTR(name, S_IRUGO, show_text_leaf, NULL), key }
+
+static struct config_rom_attribute config_rom_attributes[] = {
+ IMMEDIATE_ATTR(vendor, CSR_VENDOR),
+ IMMEDIATE_ATTR(hardware_version, CSR_HARDWARE_VERSION),
+ IMMEDIATE_ATTR(specifier_id, CSR_SPECIFIER_ID),
+ IMMEDIATE_ATTR(version, CSR_VERSION),
+ IMMEDIATE_ATTR(model, CSR_MODEL),
+ TEXT_LEAF_ATTR(vendor_name, CSR_VENDOR),
+ TEXT_LEAF_ATTR(model_name, CSR_MODEL),
+ TEXT_LEAF_ATTR(hardware_version_name, CSR_HARDWARE_VERSION),
+};
+
+static void
+init_fw_attribute_group(struct device *dev,
+ struct device_attribute *attrs,
+ struct fw_attribute_group *group)
+{
+ struct device_attribute *attr;
+ int i, j;
+
+ for (j = 0; attrs[j].attr.name != NULL; j++)
+ group->attrs[j] = &attrs[j].attr;
+
+ for (i = 0; i < ARRAY_SIZE(config_rom_attributes); i++) {
+ attr = &config_rom_attributes[i].attr;
+ if (attr->show(dev, attr, NULL) < 0)
+ continue;
+ group->attrs[j++] = &attr->attr;
+ }
+
+ BUG_ON(j >= ARRAY_SIZE(group->attrs));
+ group->attrs[j++] = NULL;
+ group->groups[0] = &group->group;
+ group->groups[1] = NULL;
+ group->group.attrs = group->attrs;
+ dev->groups = group->groups;
+}
+
+static ssize_t
+modalias_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fw_unit *unit = fw_unit(dev);
+ int length;
+
+ length = get_modalias(unit, buf, PAGE_SIZE);
+ strcpy(buf + length, "\n");
+
+ return length + 1;
+}
+
+static ssize_t
+rom_index_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fw_device *device = fw_device(dev->parent);
+ struct fw_unit *unit = fw_unit(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ (int)(unit->directory - device->config_rom));
+}
+
+static struct device_attribute fw_unit_attributes[] = {
+ __ATTR_RO(modalias),
+ __ATTR_RO(rom_index),
+ __ATTR_NULL,
+};
+
+static ssize_t
+config_rom_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct fw_device *device = fw_device(dev);
+
+ memcpy(buf, device->config_rom, device->config_rom_length * 4);
+
+ return device->config_rom_length * 4;
+}
+
+static ssize_t
+guid_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct fw_device *device = fw_device(dev);
+ u64 guid;
+
+ guid = ((u64)device->config_rom[3] << 32) | device->config_rom[4];
+
+ return snprintf(buf, PAGE_SIZE, "0x%016llx\n",
+ (unsigned long long)guid);
+}
+
+static struct device_attribute fw_device_attributes[] = {
+ __ATTR_RO(config_rom),
+ __ATTR_RO(guid),
+ __ATTR_NULL,
+};
+
+struct read_quadlet_callback_data {
+ struct completion done;
+ int rcode;
+ u32 data;
+};
+
+static void
+complete_transaction(struct fw_card *card, int rcode,
+ void *payload, size_t length, void *data)
+{
+ struct read_quadlet_callback_data *callback_data = data;
+
+ if (rcode == RCODE_COMPLETE)
+ callback_data->data = be32_to_cpu(*(__be32 *)payload);
+ callback_data->rcode = rcode;
+ complete(&callback_data->done);
+}
+
+static int read_rom(struct fw_device *device, int index, u32 * data)
+{
+ struct read_quadlet_callback_data callback_data;
+ struct fw_transaction t;
+ u64 offset;
+
+ init_completion(&callback_data.done);
+
+ offset = 0xfffff0000400ULL + index * 4;
+ fw_send_request(device->card, &t, TCODE_READ_QUADLET_REQUEST,
+ device->node_id,
+ device->generation, SCODE_100,
+ offset, NULL, 4, complete_transaction, &callback_data);
+
+ wait_for_completion(&callback_data.done);
+
+ *data = callback_data.data;
+
+ return callback_data.rcode;
+}
+
+static int read_bus_info_block(struct fw_device *device)
+{
+ static u32 rom[256];
+ u32 stack[16], sp, key;
+ int i, end, length;
+
+ /* First read the bus info block. */
+ for (i = 0; i < 5; i++) {
+ if (read_rom(device, i, &rom[i]) != RCODE_COMPLETE)
+ return -1;
+ /*
+ * As per IEEE1212 7.2, during power-up, devices can
+ * reply with a 0 for the first quadlet of the config
+ * rom to indicate that they are booting (for example,
+ * if the firmware is on the disk of a external
+ * harddisk). In that case we just fail, and the
+ * retry mechanism will try again later.
+ */
+ if (i == 0 && rom[i] == 0)
+ return -1;
+ }
+
+ /*
+ * Now parse the config rom. The config rom is a recursive
+ * directory structure so we parse it using a stack of
+ * references to the blocks that make up the structure. We
+ * push a reference to the root directory on the stack to
+ * start things off.
+ */
+ length = i;
+ sp = 0;
+ stack[sp++] = 0xc0000005;
+ while (sp > 0) {
+ /*
+ * Pop the next block reference of the stack. The
+ * lower 24 bits is the offset into the config rom,
+ * the upper 8 bits are the type of the reference the
+ * block.
+ */
+ key = stack[--sp];
+ i = key & 0xffffff;
+ if (i >= ARRAY_SIZE(rom))
+ /*
+ * The reference points outside the standard
+ * config rom area, something's fishy.
+ */
+ return -1;
+
+ /* Read header quadlet for the block to get the length. */
+ if (read_rom(device, i, &rom[i]) != RCODE_COMPLETE)
+ return -1;
+ end = i + (rom[i] >> 16) + 1;
+ i++;
+ if (end > ARRAY_SIZE(rom))
+ /*
+ * This block extends outside standard config
+ * area (and the array we're reading it
+ * into). That's broken, so ignore this
+ * device.
+ */
+ return -1;
+
+ /*
+ * Now read in the block. If this is a directory
+ * block, check the entries as we read them to see if
+ * it references another block, and push it in that case.
+ */
+ while (i < end) {
+ if (read_rom(device, i, &rom[i]) != RCODE_COMPLETE)
+ return -1;
+ if ((key >> 30) == 3 && (rom[i] >> 30) > 1 &&
+ sp < ARRAY_SIZE(stack))
+ stack[sp++] = i + rom[i];
+ i++;
+ }
+ if (length < i)
+ length = i;
+ }
+
+ device->config_rom = kmalloc(length * 4, GFP_KERNEL);
+ if (device->config_rom == NULL)
+ return -1;
+ memcpy(device->config_rom, rom, length * 4);
+ device->config_rom_length = length;
+
+ return 0;
+}
+
+static void fw_unit_release(struct device *dev)
+{
+ struct fw_unit *unit = fw_unit(dev);
+
+ kfree(unit);
+}
+
+static struct device_type fw_unit_type = {
+ .uevent = fw_unit_uevent,
+ .release = fw_unit_release,
+};
+
+static int is_fw_unit(struct device *dev)
+{
+ return dev->type == &fw_unit_type;
+}
+
+static void create_units(struct fw_device *device)
+{
+ struct fw_csr_iterator ci;
+ struct fw_unit *unit;
+ int key, value, i;
+
+ i = 0;
+ fw_csr_iterator_init(&ci, &device->config_rom[5]);
+ while (fw_csr_iterator_next(&ci, &key, &value)) {
+ if (key != (CSR_UNIT | CSR_DIRECTORY))
+ continue;
+
+ /*
+ * Get the address of the unit directory and try to
+ * match the drivers id_tables against it.
+ */
+ unit = kzalloc(sizeof(*unit), GFP_KERNEL);
+ if (unit == NULL) {
+ fw_error("failed to allocate memory for unit\n");
+ continue;
+ }
+
+ unit->directory = ci.p + value - 1;
+ unit->device.bus = &fw_bus_type;
+ unit->device.type = &fw_unit_type;
+ unit->device.parent = &device->device;
+ snprintf(unit->device.bus_id, sizeof(unit->device.bus_id),
+ "%s.%d", device->device.bus_id, i++);
+
+ init_fw_attribute_group(&unit->device,
+ fw_unit_attributes,
+ &unit->attribute_group);
+ if (device_register(&unit->device) < 0)
+ goto skip_unit;
+
+ continue;
+
+ skip_unit:
+ kfree(unit);
+ }
+}
+
+static int shutdown_unit(struct device *device, void *data)
+{
+ device_unregister(device);
+
+ return 0;
+}
+
+static DECLARE_RWSEM(idr_rwsem);
+static DEFINE_IDR(fw_device_idr);
+int fw_cdev_major;
+
+struct fw_device *fw_device_from_devt(dev_t devt)
+{
+ struct fw_device *device;
+
+ down_read(&idr_rwsem);
+ device = idr_find(&fw_device_idr, MINOR(devt));
+ up_read(&idr_rwsem);
+
+ return device;
+}
+
+static void fw_device_shutdown(struct work_struct *work)
+{
+ struct fw_device *device =
+ container_of(work, struct fw_device, work.work);
+ int minor = MINOR(device->device.devt);
+
+ down_write(&idr_rwsem);
+ idr_remove(&fw_device_idr, minor);
+ up_write(&idr_rwsem);
+
+ fw_device_cdev_remove(device);
+ device_for_each_child(&device->device, NULL, shutdown_unit);
+ device_unregister(&device->device);
+}
+
+static struct device_type fw_device_type = {
+ .release = fw_device_release,
+};
+
+/*
+ * These defines control the retry behavior for reading the config
+ * rom. It shouldn't be necessary to tweak these; if the device
+ * doesn't respond to a config rom read within 10 seconds, it's not
+ * going to respond at all. As for the initial delay, a lot of
+ * devices will be able to respond within half a second after bus
+ * reset. On the other hand, it's not really worth being more
+ * aggressive than that, since it scales pretty well; if 10 devices
+ * are plugged in, they're all getting read within one second.
+ */
+
+#define MAX_RETRIES 10
+#define RETRY_DELAY (3 * HZ)
+#define INITIAL_DELAY (HZ / 2)
+
+static void fw_device_init(struct work_struct *work)
+{
+ struct fw_device *device =
+ container_of(work, struct fw_device, work.work);
+ int minor, err;
+
+ /*
+ * All failure paths here set node->data to NULL, so that we
+ * don't try to do device_for_each_child() on a kfree()'d
+ * device.
+ */
+
+ if (read_bus_info_block(device) < 0) {
+ if (device->config_rom_retries < MAX_RETRIES) {
+ device->config_rom_retries++;
+ schedule_delayed_work(&device->work, RETRY_DELAY);
+ } else {
+ fw_notify("giving up on config rom for node id %x\n",
+ device->node_id);
+ if (device->node == device->card->root_node)
+ schedule_delayed_work(&device->card->work, 0);
+ fw_device_release(&device->device);
+ }
+ return;
+ }
+
+ err = -ENOMEM;
+ down_write(&idr_rwsem);
+ if (idr_pre_get(&fw_device_idr, GFP_KERNEL))
+ err = idr_get_new(&fw_device_idr, device, &minor);
+ up_write(&idr_rwsem);
+ if (err < 0)
+ goto error;
+
+ device->device.bus = &fw_bus_type;
+ device->device.type = &fw_device_type;
+ device->device.parent = device->card->device;
+ device->device.devt = MKDEV(fw_cdev_major, minor);
+ snprintf(device->device.bus_id, sizeof(device->device.bus_id),
+ "fw%d", minor);
+
+ init_fw_attribute_group(&device->device,
+ fw_device_attributes,
+ &device->attribute_group);
+ if (device_add(&device->device)) {
+ fw_error("Failed to add device.\n");
+ goto error_with_cdev;
+ }
+
+ create_units(device);
+
+ /*
+ * Transition the device to running state. If it got pulled
+ * out from under us while we did the intialization work, we
+ * have to shut down the device again here. Normally, though,
+ * fw_node_event will be responsible for shutting it down when
+ * necessary. We have to use the atomic cmpxchg here to avoid
+ * racing with the FW_NODE_DESTROYED case in
+ * fw_node_event().
+ */
+ if (atomic_cmpxchg(&device->state,
+ FW_DEVICE_INITIALIZING,
+ FW_DEVICE_RUNNING) == FW_DEVICE_SHUTDOWN)
+ fw_device_shutdown(&device->work.work);
+ else
+ fw_notify("created new fw device %s (%d config rom retries)\n",
+ device->device.bus_id, device->config_rom_retries);
+
+ /*
+ * Reschedule the IRM work if we just finished reading the
+ * root node config rom. If this races with a bus reset we
+ * just end up running the IRM work a couple of extra times -
+ * pretty harmless.
+ */
+ if (device->node == device->card->root_node)
+ schedule_delayed_work(&device->card->work, 0);
+
+ return;
+
+ error_with_cdev:
+ down_write(&idr_rwsem);
+ idr_remove(&fw_device_idr, minor);
+ up_write(&idr_rwsem);
+ error:
+ put_device(&device->device);
+}
+
+static int update_unit(struct device *dev, void *data)
+{
+ struct fw_unit *unit = fw_unit(dev);
+ struct fw_driver *driver = (struct fw_driver *)dev->driver;
+
+ if (is_fw_unit(dev) && driver != NULL && driver->update != NULL) {
+ down(&dev->sem);
+ driver->update(unit);
+ up(&dev->sem);
+ }
+
+ return 0;
+}
+
+static void fw_device_update(struct work_struct *work)
+{
+ struct fw_device *device =
+ container_of(work, struct fw_device, work.work);
+
+ fw_device_cdev_update(device);
+ device_for_each_child(&device->device, NULL, update_unit);
+}
+
+void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
+{
+ struct fw_device *device;
+
+ switch (event) {
+ case FW_NODE_CREATED:
+ case FW_NODE_LINK_ON:
+ if (!node->link_on)
+ break;
+
+ device = kzalloc(sizeof(*device), GFP_ATOMIC);
+ if (device == NULL)
+ break;
+
+ /*
+ * Do minimal intialization of the device here, the
+ * rest will happen in fw_device_init(). We need the
+ * card and node so we can read the config rom and we
+ * need to do device_initialize() now so
+ * device_for_each_child() in FW_NODE_UPDATED is
+ * doesn't freak out.
+ */
+ device_initialize(&device->device);
+ atomic_set(&device->state, FW_DEVICE_INITIALIZING);
+ device->card = fw_card_get(card);
+ device->node = fw_node_get(node);
+ device->node_id = node->node_id;
+ device->generation = card->generation;
+ INIT_LIST_HEAD(&device->client_list);
+
+ /*
+ * Set the node data to point back to this device so
+ * FW_NODE_UPDATED callbacks can update the node_id
+ * and generation for the device.
+ */
+ node->data = device;
+
+ /*
+ * Many devices are slow to respond after bus resets,
+ * especially if they are bus powered and go through
+ * power-up after getting plugged in. We schedule the
+ * first config rom scan half a second after bus reset.
+ */
+ INIT_DELAYED_WORK(&device->work, fw_device_init);
+ schedule_delayed_work(&device->work, INITIAL_DELAY);
+ break;
+
+ case FW_NODE_UPDATED:
+ if (!node->link_on || node->data == NULL)
+ break;
+
+ device = node->data;
+ device->node_id = node->node_id;
+ device->generation = card->generation;
+ if (atomic_read(&device->state) == FW_DEVICE_RUNNING) {
+ PREPARE_DELAYED_WORK(&device->work, fw_device_update);
+ schedule_delayed_work(&device->work, 0);
+ }
+ break;
+
+ case FW_NODE_DESTROYED:
+ case FW_NODE_LINK_OFF:
+ if (!node->data)
+ break;
+
+ /*
+ * Destroy the device associated with the node. There
+ * are two cases here: either the device is fully
+ * initialized (FW_DEVICE_RUNNING) or we're in the
+ * process of reading its config rom
+ * (FW_DEVICE_INITIALIZING). If it is fully
+ * initialized we can reuse device->work to schedule a
+ * full fw_device_shutdown(). If not, there's work
+ * scheduled to read it's config rom, and we just put
+ * the device in shutdown state to have that code fail
+ * to create the device.
+ */
+ device = node->data;
+ if (atomic_xchg(&device->state,
+ FW_DEVICE_SHUTDOWN) == FW_DEVICE_RUNNING) {
+ PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown);
+ schedule_delayed_work(&device->work, 0);
+ }
+ break;
+ }
+}
diff --git a/drivers/firewire/fw-device.h b/drivers/firewire/fw-device.h
new file mode 100644
index 00000000000..0ba9d64ccf4
--- /dev/null
+++ b/drivers/firewire/fw-device.h
@@ -0,0 +1,146 @@
+/*
+ * Copyright (C) 2005-2006 Kristian Hoegsberg <krh@bitplanet.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef __fw_device_h
+#define __fw_device_h
+
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <asm/atomic.h>
+
+enum fw_device_state {
+ FW_DEVICE_INITIALIZING,
+ FW_DEVICE_RUNNING,
+ FW_DEVICE_SHUTDOWN,
+};
+
+struct fw_attribute_group {
+ struct attribute_group *groups[2];
+ struct attribute_group group;
+ struct attribute *attrs[11];
+};
+
+struct fw_device {
+ atomic_t state;
+ struct fw_node *node;
+ int node_id;
+ int generation;
+ struct fw_card *card;
+ struct device device;
+ struct list_head link;
+ struct list_head client_list;
+ u32 *config_rom;
+ size_t config_rom_length;
+ int config_rom_retries;
+ struct delayed_work work;
+ struct fw_attribute_group attribute_group;
+};
+
+static inline struct fw_device *
+fw_device(struct device *dev)
+{
+ return container_of(dev, struct fw_device, device);
+}
+
+static inline int
+fw_device_is_shutdown(struct fw_device *device)
+{
+ return atomic_read(&device->state) == FW_DEVICE_SHUTDOWN;
+}
+
+struct fw_device *fw_device_get(struct fw_device *device);
+void fw_device_put(struct fw_device *device);
+int fw_device_enable_phys_dma(struct fw_device *device);
+
+void fw_device_cdev_update(struct fw_device *device);
+void fw_device_cdev_remove(struct fw_device *device);
+
+struct fw_device *fw_device_from_devt(dev_t devt);
+extern int fw_cdev_major;
+
+struct fw_unit {
+ struct device device;
+ u32 *directory;
+ struct fw_attribute_group attribute_group;
+};
+
+static inline struct fw_unit *
+fw_unit(struct device *dev)
+{
+ return container_of(dev, struct fw_unit, device);
+}
+
+#define CSR_OFFSET 0x40
+#define CSR_LEAF 0x80
+#define CSR_DIRECTORY 0xc0
+
+#define CSR_DESCRIPTOR 0x01
+#define CSR_VENDOR 0x03
+#define CSR_HARDWARE_VERSION 0x04
+#define CSR_NODE_CAPABILITIES 0x0c
+#define CSR_UNIT 0x11
+#define CSR_SPECIFIER_ID 0x12
+#define CSR_VERSION 0x13
+#define CSR_DEPENDENT_INFO 0x14
+#define CSR_MODEL 0x17
+#define CSR_INSTANCE 0x18
+
+#define SBP2_COMMAND_SET_SPECIFIER 0x38
+#define SBP2_COMMAND_SET 0x39
+#define SBP2_COMMAND_SET_REVISION 0x3b
+#define SBP2_FIRMWARE_REVISION 0x3c
+
+struct fw_csr_iterator {
+ u32 *p;
+ u32 *end;
+};
+
+void fw_csr_iterator_init(struct fw_csr_iterator *ci, u32 *p);
+int fw_csr_iterator_next(struct fw_csr_iterator *ci,
+ int *key, int *value);
+
+#define FW_MATCH_VENDOR 0x0001
+#define FW_MATCH_MODEL 0x0002
+#define FW_MATCH_SPECIFIER_ID 0x0004
+#define FW_MATCH_VERSION 0x0008
+
+struct fw_device_id {
+ u32 match_flags;
+ u32 vendor;
+ u32 model;
+ u32 specifier_id;
+ u32 version;
+ void *driver_data;
+};
+
+struct fw_driver {
+ struct device_driver driver;
+ /* Called when the parent device sits through a bus reset. */
+ void (*update) (struct fw_unit *unit);
+ const struct fw_device_id *id_table;
+};
+
+static inline struct fw_driver *
+fw_driver(struct device_driver *drv)
+{
+ return container_of(drv, struct fw_driver, driver);
+}
+
+extern const struct file_operations fw_device_ops;
+
+#endif /* __fw_device_h */
diff --git a/drivers/firewire/fw-iso.c b/drivers/firewire/fw-iso.c
new file mode 100644
index 00000000000..2b640e9be6d
--- /dev/null
+++ b/drivers/firewire/fw-iso.c
@@ -0,0 +1,163 @@
+/*
+ * Isochronous IO functionality
+ *
+ * Copyright (C) 2006 Kristian Hoegsberg <krh@bitplanet.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/dma-mapping.h>
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
+
+#include "fw-transaction.h"
+#include "fw-topology.h"
+#include "fw-device.h"
+
+int
+fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
+ int page_count, enum dma_data_direction direction)
+{
+ int i, j, retval = -ENOMEM;
+ dma_addr_t address;
+
+ buffer->page_count = page_count;
+ buffer->direction = direction;
+
+ buffer->pages = kmalloc(page_count * sizeof(buffer->pages[0]),
+ GFP_KERNEL);
+ if (buffer->pages == NULL)
+ goto out;
+
+ for (i = 0; i < buffer->page_count; i++) {
+ buffer->pages[i] = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
+ if (buffer->pages[i] == NULL)
+ goto out_pages;
+
+ address = dma_map_page(card->device, buffer->pages[i],
+ 0, PAGE_SIZE, direction);
+ if (dma_mapping_error(address)) {
+ __free_page(buffer->pages[i]);
+ goto out_pages;
+ }
+ set_page_private(buffer->pages[i], address);
+ }
+
+ return 0;
+
+ out_pages:
+ for (j = 0; j < i; j++) {
+ address = page_private(buffer->pages[j]);
+ dma_unmap_page(card->device, address,
+ PAGE_SIZE, DMA_TO_DEVICE);
+ __free_page(buffer->pages[j]);
+ }
+ kfree(buffer->pages);
+ out:
+ buffer->pages = NULL;
+ return retval;
+}
+
+int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma)
+{
+ unsigned long uaddr;
+ int i, retval;
+
+ uaddr = vma->vm_start;
+ for (i = 0; i < buffer->page_count; i++) {
+ retval = vm_insert_page(vma, uaddr, buffer->pages[i]);
+ if (retval)
+ return retval;
+ uaddr += PAGE_SIZE;
+ }
+
+ return 0;
+}
+
+void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer,
+ struct fw_card *card)
+{
+ int i;
+ dma_addr_t address;
+
+ for (i = 0; i < buffer->page_count; i++) {
+ address = page_private(buffer->pages[i]);
+ dma_unmap_page(card->device, address,
+ PAGE_SIZE, DMA_TO_DEVICE);
+ __free_page(buffer->pages[i]);
+ }
+
+ kfree(buffer->pages);
+ buffer->pages = NULL;
+}
+
+struct fw_iso_context *
+fw_iso_context_create(struct fw_card *card, int type,
+ int channel, int speed, size_t header_size,
+ fw_iso_callback_t callback, void *callback_data)
+{
+ struct fw_iso_context *ctx;
+
+ ctx = card->driver->allocate_iso_context(card, type, header_size);
+ if (IS_ERR(ctx))
+ return ctx;
+
+ ctx->card = card;
+ ctx->type = type;
+ ctx->channel = channel;
+ ctx->speed = speed;
+ ctx->header_size = header_size;
+ ctx->callback = callback;
+ ctx->callback_data = callback_data;
+
+ return ctx;
+}
+EXPORT_SYMBOL(fw_iso_context_create);
+
+void fw_iso_context_destroy(struct fw_iso_context *ctx)
+{
+ struct fw_card *card = ctx->card;
+
+ card->driver->free_iso_context(ctx);
+}
+EXPORT_SYMBOL(fw_iso_context_destroy);
+
+int
+fw_iso_context_start(struct fw_iso_context *ctx, int cycle, int sync, int tags)
+{
+ return ctx->card->driver->start_iso(ctx, cycle, sync, tags);
+}
+EXPORT_SYMBOL(fw_iso_context_start);
+
+int
+fw_iso_context_queue(struct fw_iso_context *ctx,
+ struct fw_iso_packet *packet,
+ struct fw_iso_buffer *buffer,
+ unsigned long payload)
+{
+ struct fw_card *card = ctx->card;
+
+ return card->driver->queue_iso(ctx, packet, buffer, payload);
+}
+EXPORT_SYMBOL(fw_iso_context_queue);
+
+int
+fw_iso_context_stop(struct fw_iso_context *ctx)
+{
+ return ctx->card->driver->stop_iso(ctx);
+}
+EXPORT_SYMBOL(fw_iso_context_stop);
diff --git a/drivers/firewire/fw-ohci.c b/drivers/firewire/fw-ohci.c
new file mode 100644
index 00000000000..1f5c70461b8
--- /dev/null
+++ b/drivers/firewire/fw-ohci.c
@@ -0,0 +1,1943 @@
+/*
+ * Driver for OHCI 1394 controllers
+ *
+ * Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/poll.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/uaccess.h>
+#include <asm/semaphore.h>
+
+#include "fw-transaction.h"
+#include "fw-ohci.h"
+
+#define DESCRIPTOR_OUTPUT_MORE 0
+#define DESCRIPTOR_OUTPUT_LAST (1 << 12)
+#define DESCRIPTOR_INPUT_MORE (2 << 12)
+#define DESCRIPTOR_INPUT_LAST (3 << 12)
+#define DESCRIPTOR_STATUS (1 << 11)
+#define DESCRIPTOR_KEY_IMMEDIATE (2 << 8)
+#define DESCRIPTOR_PING (1 << 7)
+#define DESCRIPTOR_YY (1 << 6)
+#define DESCRIPTOR_NO_IRQ (0 << 4)
+#define DESCRIPTOR_IRQ_ERROR (1 << 4)
+#define DESCRIPTOR_IRQ_ALWAYS (3 << 4)
+#define DESCRIPTOR_BRANCH_ALWAYS (3 << 2)
+#define DESCRIPTOR_WAIT (3 << 0)
+
+struct descriptor {
+ __le16 req_count;
+ __le16 control;
+ __le32 data_address;
+ __le32 branch_address;
+ __le16 res_count;
+ __le16 transfer_status;
+} __attribute__((aligned(16)));
+
+struct db_descriptor {
+ __le16 first_size;
+ __le16 control;
+ __le16 second_req_count;
+ __le16 first_req_count;
+ __le32 branch_address;
+ __le16 second_res_count;
+ __le16 first_res_count;
+ __le32 reserved0;
+ __le32 first_buffer;
+ __le32 second_buffer;
+ __le32 reserved1;
+} __attribute__((aligned(16)));
+
+#define CONTROL_SET(regs) (regs)
+#define CONTROL_CLEAR(regs) ((regs) + 4)
+#define COMMAND_PTR(regs) ((regs) + 12)
+#define CONTEXT_MATCH(regs) ((regs) + 16)
+
+struct ar_buffer {
+ struct descriptor descriptor;
+ struct ar_buffer *next;
+ __le32 data[0];
+};
+
+struct ar_context {
+ struct fw_ohci *ohci;
+ struct ar_buffer *current_buffer;
+ struct ar_buffer *last_buffer;
+ void *pointer;
+ u32 regs;
+ struct tasklet_struct tasklet;
+};
+
+struct context;
+
+typedef int (*descriptor_callback_t)(struct context *ctx,
+ struct descriptor *d,
+ struct descriptor *last);
+struct context {
+ struct fw_ohci *ohci;
+ u32 regs;
+
+ struct descriptor *buffer;
+ dma_addr_t buffer_bus;
+ size_t buffer_size;
+ struct descriptor *head_descriptor;
+ struct descriptor *tail_descriptor;
+ struct descriptor *tail_descriptor_last;
+ struct descriptor *prev_descriptor;
+
+ descriptor_callback_t callback;
+
+ struct tasklet_struct tasklet;
+};
+
+#define IT_HEADER_SY(v) ((v) << 0)
+#define IT_HEADER_TCODE(v) ((v) << 4)
+#define IT_HEADER_CHANNEL(v) ((v) << 8)
+#define IT_HEADER_TAG(v) ((v) << 14)
+#define IT_HEADER_SPEED(v) ((v) << 16)
+#define IT_HEADER_DATA_LENGTH(v) ((v) << 16)
+
+struct iso_context {
+ struct fw_iso_context base;
+ struct context context;
+ void *header;
+ size_t header_length;
+};
+
+#define CONFIG_ROM_SIZE 1024
+
+struct fw_ohci {
+ struct fw_card card;
+
+ u32 version;
+ __iomem char *registers;
+ dma_addr_t self_id_bus;
+ __le32 *self_id_cpu;
+ struct tasklet_struct bus_reset_tasklet;
+ int node_id;
+ int generation;
+ int request_generation;
+ u32 bus_seconds;
+
+ /*
+ * Spinlock for accessing fw_ohci data. Never call out of
+ * this driver with this lock held.
+ */
+ spinlock_t lock;
+ u32 self_id_buffer[512];
+
+ /* Config rom buffers */
+ __be32 *config_rom;
+ dma_addr_t config_rom_bus;
+ __be32 *next_config_rom;
+ dma_addr_t next_config_rom_bus;
+ u32 next_header;
+
+ struct ar_context ar_request_ctx;
+ struct ar_context ar_response_ctx;
+ struct context at_request_ctx;
+ struct context at_response_ctx;
+
+ u32 it_context_mask;
+ struct iso_context *it_context_list;
+ u32 ir_context_mask;
+ struct iso_context *ir_context_list;
+};
+
+static inline struct fw_ohci *fw_ohci(struct fw_card *card)
+{
+ return container_of(card, struct fw_ohci, card);
+}
+
+#define IT_CONTEXT_CYCLE_MATCH_ENABLE 0x80000000
+#define IR_CONTEXT_BUFFER_FILL 0x80000000
+#define IR_CONTEXT_ISOCH_HEADER 0x40000000
+#define IR_CONTEXT_CYCLE_MATCH_ENABLE 0x20000000
+#define IR_CONTEXT_MULTI_CHANNEL_MODE 0x10000000
+#define IR_CONTEXT_DUAL_BUFFER_MODE 0x08000000
+
+#define CONTEXT_RUN 0x8000
+#define CONTEXT_WAKE 0x1000
+#define CONTEXT_DEAD 0x0800
+#define CONTEXT_ACTIVE 0x0400
+
+#define OHCI1394_MAX_AT_REQ_RETRIES 0x2
+#define OHCI1394_MAX_AT_RESP_RETRIES 0x2
+#define OHCI1394_MAX_PHYS_RESP_RETRIES 0x8
+
+#define FW_OHCI_MAJOR 240
+#define OHCI1394_REGISTER_SIZE 0x800
+#define OHCI_LOOP_COUNT 500
+#define OHCI1394_PCI_HCI_Control 0x40
+#define SELF_ID_BUF_SIZE 0x800
+#define OHCI_TCODE_PHY_PACKET 0x0e
+#define OHCI_VERSION_1_1 0x010010
+#define ISO_BUFFER_SIZE (64 * 1024)
+#define AT_BUFFER_SIZE 4096
+
+static char ohci_driver_name[] = KBUILD_MODNAME;
+
+static inline void reg_write(const struct fw_ohci *ohci, int offset, u32 data)
+{
+ writel(data, ohci->registers + offset);
+}
+
+static inline u32 reg_read(const struct fw_ohci *ohci, int offset)
+{
+ return readl(ohci->registers + offset);
+}
+
+static inline void flush_writes(const struct fw_ohci *ohci)
+{
+ /* Do a dummy read to flush writes. */
+ reg_read(ohci, OHCI1394_Version);
+}
+
+static int
+ohci_update_phy_reg(struct fw_card *card, int addr,
+ int clear_bits, int set_bits)
+{
+ struct fw_ohci *ohci = fw_ohci(card);
+ u32 val, old;
+
+ reg_write(ohci, OHCI1394_PhyControl, OHCI1394_PhyControl_Read(addr));
+ msleep(2);
+ val = reg_read(ohci, OHCI1394_PhyControl);
+ if ((val & OHCI1394_PhyControl_ReadDone) == 0) {
+ fw_error("failed to set phy reg bits.\n");
+ return -EBUSY;
+ }
+
+ old = OHCI1394_PhyControl_ReadData(val);
+ old = (old & ~clear_bits) | set_bits;
+ reg_write(ohci, OHCI1394_PhyControl,
+ OHCI1394_PhyControl_Write(addr, old));
+
+ return 0;
+}
+
+static int ar_context_add_page(struct ar_context *ctx)
+{
+ struct device *dev = ctx->ohci->card.device;
+ struct ar_buffer *ab;
+ dma_addr_t ab_bus;
+ size_t offset;
+
+ ab = (struct ar_buffer *) __get_free_page(GFP_ATOMIC);
+ if (ab == NULL)
+ return -ENOMEM;
+
+ ab_bus = dma_map_single(dev, ab, PAGE_SIZE, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(ab_bus)) {
+ free_page((unsigned long) ab);
+ return -ENOMEM;
+ }
+
+ memset(&ab->descriptor, 0, sizeof(ab->descriptor));
+ ab->descriptor.control = cpu_to_le16(DESCRIPTOR_INPUT_MORE |
+ DESCRIPTOR_STATUS |
+ DESCRIPTOR_BRANCH_ALWAYS);
+ offset = offsetof(struct ar_buffer, data);
+ ab->descriptor.req_count = cpu_to_le16(PAGE_SIZE - offset);
+ ab->descriptor.data_address = cpu_to_le32(ab_bus + offset);
+ ab->descriptor.res_count = cpu_to_le16(PAGE_SIZE - offset);
+ ab->descriptor.branch_address = 0;
+
+ dma_sync_single_for_device(dev, ab_bus, PAGE_SIZE, DMA_BIDIRECTIONAL);
+
+ ctx->last_buffer->descriptor.branch_address = ab_bus | 1;
+ ctx->last_buffer->next = ab;
+ ctx->last_buffer = ab;
+
+ reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
+ flush_writes(ctx->ohci);
+
+ return 0;
+}
+
+static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
+{
+ struct fw_ohci *ohci = ctx->ohci;
+ struct fw_packet p;
+ u32 status, length, tcode;
+
+ p.header[0] = le32_to_cpu(buffer[0]);
+ p.header[1] = le32_to_cpu(buffer[1]);
+ p.header[2] = le32_to_cpu(buffer[2]);
+
+ tcode = (p.header[0] >> 4) & 0x0f;
+ switch (tcode) {
+ case TCODE_WRITE_QUADLET_REQUEST:
+ case TCODE_READ_QUADLET_RESPONSE:
+ p.header[3] = (__force __u32) buffer[3];
+ p.header_length = 16;
+ p.payload_length = 0;
+ break;
+
+ case TCODE_READ_BLOCK_REQUEST :
+ p.header[3] = le32_to_cpu(buffer[3]);
+ p.header_length = 16;
+ p.payload_length = 0;
+ break;
+
+ case TCODE_WRITE_BLOCK_REQUEST:
+ case TCODE_READ_BLOCK_RESPONSE:
+ case TCODE_LOCK_REQUEST:
+ case TCODE_LOCK_RESPONSE:
+ p.header[3] = le32_to_cpu(buffer[3]);
+ p.header_length = 16;
+ p.payload_length = p.header[3] >> 16;
+ break;
+
+ case TCODE_WRITE_RESPONSE:
+ case TCODE_READ_QUADLET_REQUEST:
+ case OHCI_TCODE_PHY_PACKET:
+ p.header_length = 12;
+ p.payload_length = 0;
+ break;
+ }
+
+ p.payload = (void *) buffer + p.header_length;
+
+ /* FIXME: What to do about evt_* errors? */
+ length = (p.header_length + p.payload_length + 3) / 4;
+ status = le32_to_cpu(buffer[length]);
+
+ p.ack = ((status >> 16) & 0x1f) - 16;
+ p.speed = (status >> 21) & 0x7;
+ p.timestamp = status & 0xffff;
+ p.generation = ohci->request_generation;
+
+ /*
+ * The OHCI bus reset handler synthesizes a phy packet with
+ * the new generation number when a bus reset happens (see
+ * section 8.4.2.3). This helps us determine when a request
+ * was received and make sure we send the response in the same
+ * generation. We only need this for requests; for responses
+ * we use the unique tlabel for finding the matching
+ * request.
+ */
+
+ if (p.ack + 16 == 0x09)
+ ohci->request_generation = (buffer[2] >> 16) & 0xff;
+ else if (ctx == &ohci->ar_request_ctx)
+ fw_core_handle_request(&ohci->card, &p);
+ else
+ fw_core_handle_response(&ohci->card, &p);
+
+ return buffer + length + 1;
+}
+
+static void ar_context_tasklet(unsigned long data)
+{
+ struct ar_context *ctx = (struct ar_context *)data;
+ struct fw_ohci *ohci = ctx->ohci;
+ struct ar_buffer *ab;
+ struct descriptor *d;
+ void *buffer, *end;
+
+ ab = ctx->current_buffer;
+ d = &ab->descriptor;
+
+ if (d->res_count == 0) {
+ size_t size, rest, offset;
+
+ /*
+ * This descriptor is finished and we may have a
+ * packet split across this and the next buffer. We
+ * reuse the page for reassembling the split packet.
+ */
+
+ offset = offsetof(struct ar_buffer, data);
+ dma_unmap_single(ohci->card.device,
+ ab->descriptor.data_address - offset,
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+
+ buffer = ab;
+ ab = ab->next;
+ d = &ab->descriptor;
+ size = buffer + PAGE_SIZE - ctx->pointer;
+ rest = le16_to_cpu(d->req_count) - le16_to_cpu(d->res_count);
+ memmove(buffer, ctx->pointer, size);
+ memcpy(buffer + size, ab->data, rest);
+ ctx->current_buffer = ab;
+ ctx->pointer = (void *) ab->data + rest;
+ end = buffer + size + rest;
+
+ while (buffer < end)
+ buffer = handle_ar_packet(ctx, buffer);
+
+ free_page((unsigned long)buffer);
+ ar_context_add_page(ctx);
+ } else {
+ buffer = ctx->pointer;
+ ctx->pointer = end =
+ (void *) ab + PAGE_SIZE - le16_to_cpu(d->res_count);
+
+ while (buffer < end)
+ buffer = handle_ar_packet(ctx, buffer);
+ }
+}
+
+static int
+ar_context_init(struct ar_context *ctx, struct fw_ohci *ohci, u32 regs)
+{
+ struct ar_buffer ab;
+
+ ctx->regs = regs;
+ ctx->ohci = ohci;
+ ctx->last_buffer = &ab;
+ tasklet_init(&ctx->tasklet, ar_context_tasklet, (unsigned long)ctx);
+
+ ar_context_add_page(ctx);
+ ar_context_add_page(ctx);
+ ctx->current_buffer = ab.next;
+ ctx->pointer = ctx->current_buffer->data;
+
+ reg_write(ctx->ohci, COMMAND_PTR(ctx->regs), ab.descriptor.branch_address);
+ reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN);
+ flush_writes(ctx->ohci);
+
+ return 0;
+}
+
+static void context_tasklet(unsigned long data)
+{
+ struct context *ctx = (struct context *) data;
+ struct fw_ohci *ohci = ctx->ohci;
+ struct descriptor *d, *last;
+ u32 address;
+ int z;
+
+ dma_sync_single_for_cpu(ohci->card.device, ctx->buffer_bus,
+ ctx->buffer_size, DMA_TO_DEVICE);
+
+ d = ctx->tail_descriptor;
+ last = ctx->tail_descriptor_last;
+
+ while (last->branch_address != 0) {
+ address = le32_to_cpu(last->branch_address);
+ z = address & 0xf;
+ d = ctx->buffer + (address - ctx->buffer_bus) / sizeof(*d);
+ last = (z == 2) ? d : d + z - 1;
+
+ if (!ctx->callback(ctx, d, last))
+ break;
+
+ ctx->tail_descriptor = d;
+ ctx->tail_descriptor_last = last;
+ }
+}
+
+static int
+context_init(struct context *ctx, struct fw_ohci *ohci,
+ size_t buffer_size, u32 regs,
+ descriptor_callback_t callback)
+{
+ ctx->ohci = ohci;
+ ctx->regs = regs;
+ ctx->buffer_size = buffer_size;
+ ctx->buffer = kmalloc(buffer_size, GFP_KERNEL);
+ if (ctx->buffer == NULL)
+ return -ENOMEM;
+
+ tasklet_init(&ctx->tasklet, context_tasklet, (unsigned long)ctx);
+ ctx->callback = callback;
+
+ ctx->buffer_bus =
+ dma_map_single(ohci->card.device, ctx->buffer,
+ buffer_size, DMA_TO_DEVICE);
+ if (dma_mapping_error(ctx->buffer_bus)) {
+ kfree(ctx->buffer);
+ return -ENOMEM;
+ }
+
+ ctx->head_descriptor = ctx->buffer;
+ ctx->prev_descriptor = ctx->buffer;
+ ctx->tail_descriptor = ctx->buffer;
+ ctx->tail_descriptor_last = ctx->buffer;
+
+ /*
+ * We put a dummy descriptor in the buffer that has a NULL
+ * branch address and looks like it's been sent. That way we
+ * have a descriptor to append DMA programs to. Also, the
+ * ring buffer invariant is that it always has at least one
+ * element so that head == tail means buffer full.
+ */
+
+ memset(ctx->head_descriptor, 0, sizeof(*ctx->head_descriptor));
+ ctx->head_descriptor->control = cpu_to_le16(DESCRIPTOR_OUTPUT_LAST);
+ ctx->head_descriptor->transfer_status = cpu_to_le16(0x8011);
+ ctx->head_descriptor++;
+
+ return 0;
+}
+
+static void
+context_release(struct context *ctx)
+{
+ struct fw_card *card = &ctx->ohci->card;
+
+ dma_unmap_single(card->device, ctx->buffer_bus,
+ ctx->buffer_size, DMA_TO_DEVICE);
+ kfree(ctx->buffer);
+}
+
+static struct descriptor *
+context_get_descriptors(struct context *ctx, int z, dma_addr_t *d_bus)
+{
+ struct descriptor *d, *tail, *end;
+
+ d = ctx->head_descriptor;
+ tail = ctx->tail_descriptor;
+ end = ctx->buffer + ctx->buffer_size / sizeof(*d);
+
+ if (d + z <= tail) {
+ goto has_space;
+ } else if (d > tail && d + z <= end) {
+ goto has_space;
+ } else if (d > tail && ctx->buffer + z <= tail) {
+ d = ctx->buffer;
+ goto has_space;
+ }
+
+ return NULL;
+
+ has_space:
+ memset(d, 0, z * sizeof(*d));
+ *d_bus = ctx->buffer_bus + (d - ctx->buffer) * sizeof(*d);
+
+ return d;
+}
+
+static void context_run(struct context *ctx, u32 extra)
+{
+ struct fw_ohci *ohci = ctx->ohci;
+
+ reg_write(ohci, COMMAND_PTR(ctx->regs),
+ le32_to_cpu(ctx->tail_descriptor_last->branch_address));
+ reg_write(ohci, CONTROL_CLEAR(ctx->regs), ~0);
+ reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN | extra);
+ flush_writes(ohci);
+}
+
+static void context_append(struct context *ctx,
+ struct descriptor *d, int z, int extra)
+{
+ dma_addr_t d_bus;
+
+ d_bus = ctx->buffer_bus + (d - ctx->buffer) * sizeof(*d);
+
+ ctx->head_descriptor = d + z + extra;
+ ctx->prev_descriptor->branch_address = cpu_to_le32(d_bus | z);
+ ctx->prev_descriptor = z == 2 ? d : d + z - 1;
+
+ dma_sync_single_for_device(ctx->ohci->card.device, ctx->buffer_bus,
+ ctx->buffer_size, DMA_TO_DEVICE);
+
+ reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
+ flush_writes(ctx->ohci);
+}
+
+static void context_stop(struct context *ctx)
+{
+ u32 reg;
+ int i;
+
+ reg_write(ctx->ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN);
+ flush_writes(ctx->ohci);
+
+ for (i = 0; i < 10; i++) {
+ reg = reg_read(ctx->ohci, CONTROL_SET(ctx->regs));
+ if ((reg & CONTEXT_ACTIVE) == 0)
+ break;
+
+ fw_notify("context_stop: still active (0x%08x)\n", reg);
+ msleep(1);
+ }
+}
+
+struct driver_data {
+ struct fw_packet *packet;
+};
+
+/*
+ * This function apppends a packet to the DMA queue for transmission.
+ * Must always be called with the ochi->lock held to ensure proper
+ * generation handling and locking around packet queue manipulation.
+ */
+static int
+at_context_queue_packet(struct context *ctx, struct fw_packet *packet)
+{
+ struct fw_ohci *ohci = ctx->ohci;
+ dma_addr_t d_bus, payload_bus;
+ struct driver_data *driver_data;
+ struct descriptor *d, *last;
+ __le32 *header;
+ int z, tcode;
+ u32 reg;
+
+ d = context_get_descriptors(ctx, 4, &d_bus);
+ if (d == NULL) {
+ packet->ack = RCODE_SEND_ERROR;
+ return -1;
+ }
+
+ d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
+ d[0].res_count = cpu_to_le16(packet->timestamp);
+
+ /*
+ * The DMA format for asyncronous link packets is different
+ * from the IEEE1394 layout, so shift the fields around
+ * accordingly. If header_length is 8, it's a PHY packet, to
+ * which we need to prepend an extra quadlet.
+ */
+
+ header = (__le32 *) &d[1];
+ if (packet->header_length > 8) {
+ header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
+ (packet->speed << 16));
+ header[1] = cpu_to_le32((packet->header[1] & 0xffff) |
+ (packet->header[0] & 0xffff0000));
+ header[2] = cpu_to_le32(packet->header[2]);
+
+ tcode = (packet->header[0] >> 4) & 0x0f;
+ if (TCODE_IS_BLOCK_PACKET(tcode))
+ header[3] = cpu_to_le32(packet->header[3]);
+ else
+ header[3] = (__force __le32) packet->header[3];
+
+ d[0].req_count = cpu_to_le16(packet->header_length);
+ } else {
+ header[0] = cpu_to_le32((OHCI1394_phy_tcode << 4) |
+ (packet->speed << 16));
+ header[1] = cpu_to_le32(packet->header[0]);
+ header[2] = cpu_to_le32(packet->header[1]);
+ d[0].req_count = cpu_to_le16(12);
+ }
+
+ driver_data = (struct driver_data *) &d[3];
+ driver_data->packet = packet;
+ packet->driver_data = driver_data;
+
+ if (packet->payload_length > 0) {
+ payload_bus =
+ dma_map_single(ohci->card.device, packet->payload,
+ packet->payload_length, DMA_TO_DEVICE);
+ if (dma_mapping_error(payload_bus)) {
+ packet->ack = RCODE_SEND_ERROR;
+ return -1;
+ }
+
+ d[2].req_count = cpu_to_le16(packet->payload_length);
+ d[2].data_address = cpu_to_le32(payload_bus);
+ last = &d[2];
+ z = 3;
+ } else {
+ last = &d[0];
+ z = 2;
+ }
+
+ last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST |
+ DESCRIPTOR_IRQ_ALWAYS |
+ DESCRIPTOR_BRANCH_ALWAYS);
+
+ /* FIXME: Document how the locking works. */
+ if (ohci->generation != packet->generation) {
+ packet->ack = RCODE_GENERATION;
+ return -1;
+ }
+
+ context_append(ctx, d, z, 4 - z);
+
+ /* If the context isn't already running, start it up. */
+ reg = reg_read(ctx->ohci, CONTROL_SET(ctx->regs));
+ if ((reg & CONTEXT_RUN) == 0)
+ context_run(ctx, 0);
+
+ return 0;
+}
+
+static int handle_at_packet(struct context *context,
+ struct descriptor *d,
+ struct descriptor *last)
+{
+ struct driver_data *driver_data;
+ struct fw_packet *packet;
+ struct fw_ohci *ohci = context->ohci;
+ dma_addr_t payload_bus;
+ int evt;
+
+ if (last->transfer_status == 0)
+ /* This descriptor isn't done yet, stop iteration. */
+ return 0;
+
+ driver_data = (struct driver_data *) &d[3];
+ packet = driver_data->packet;
+ if (packet == NULL)
+ /* This packet was cancelled, just continue. */
+ return 1;
+
+ payload_bus = le32_to_cpu(last->data_address);
+ if (payload_bus != 0)
+ dma_unmap_single(ohci->card.device, payload_bus,
+ packet->payload_length, DMA_TO_DEVICE);
+
+ evt = le16_to_cpu(last->transfer_status) & 0x1f;
+ packet->timestamp = le16_to_cpu(last->res_count);
+
+ switch (evt) {
+ case OHCI1394_evt_timeout:
+ /* Async response transmit timed out. */
+ packet->ack = RCODE_CANCELLED;
+ break;
+
+ case OHCI1394_evt_flushed:
+ /*
+ * The packet was flushed should give same error as
+ * when we try to use a stale generation count.
+ */
+ packet->ack = RCODE_GENERATION;
+ break;
+
+ case OHCI1394_evt_missing_ack:
+ /*
+ * Using a valid (current) generation count, but the
+ * node is not on the bus or not sending acks.
+ */
+ packet->ack = RCODE_NO_ACK;
+ break;
+
+ case ACK_COMPLETE + 0x10:
+ case ACK_PENDING + 0x10:
+ case ACK_BUSY_X + 0x10:
+ case ACK_BUSY_A + 0x10:
+ case ACK_BUSY_B + 0x10:
+ case ACK_DATA_ERROR + 0x10:
+ case ACK_TYPE_ERROR + 0x10:
+ packet->ack = evt - 0x10;
+ break;
+
+ default:
+ packet->ack = RCODE_SEND_ERROR;
+ break;
+ }
+
+ packet->callback(packet, &ohci->card, packet->ack);
+
+ return 1;
+}
+
+#define HEADER_GET_DESTINATION(q) (((q) >> 16) & 0xffff)
+#define HEADER_GET_TCODE(q) (((q) >> 4) & 0x0f)
+#define HEADER_GET_OFFSET_HIGH(q) (((q) >> 0) & 0xffff)
+#define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff)
+#define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff)
+
+static void
+handle_local_rom(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr)
+{
+ struct fw_packet response;
+ int tcode, length, i;
+
+ tcode = HEADER_GET_TCODE(packet->header[0]);
+ if (TCODE_IS_BLOCK_PACKET(tcode))
+ length = HEADER_GET_DATA_LENGTH(packet->header[3]);
+ else
+ length = 4;
+
+ i = csr - CSR_CONFIG_ROM;
+ if (i + length > CONFIG_ROM_SIZE) {
+ fw_fill_response(&response, packet->header,
+ RCODE_ADDRESS_ERROR, NULL, 0);
+ } else if (!TCODE_IS_READ_REQUEST(tcode)) {
+ fw_fill_response(&response, packet->header,
+ RCODE_TYPE_ERROR, NULL, 0);
+ } else {
+ fw_fill_response(&response, packet->header, RCODE_COMPLETE,
+ (void *) ohci->config_rom + i, length);
+ }
+
+ fw_core_handle_response(&ohci->card, &response);
+}
+
+static void
+handle_local_lock(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr)
+{
+ struct fw_packet response;
+ int tcode, length, ext_tcode, sel;
+ __be32 *payload, lock_old;
+ u32 lock_arg, lock_data;
+
+ tcode = HEADER_GET_TCODE(packet->header[0]);
+ length = HEADER_GET_DATA_LENGTH(packet->header[3]);
+ payload = packet->payload;
+ ext_tcode = HEADER_GET_EXTENDED_TCODE(packet->header[3]);
+
+ if (tcode == TCODE_LOCK_REQUEST &&
+ ext_tcode == EXTCODE_COMPARE_SWAP && length == 8) {
+ lock_arg = be32_to_cpu(payload[0]);
+ lock_data = be32_to_cpu(payload[1]);
+ } else if (tcode == TCODE_READ_QUADLET_REQUEST) {
+ lock_arg = 0;
+ lock_data = 0;
+ } else {
+ fw_fill_response(&response, packet->header,
+ RCODE_TYPE_ERROR, NULL, 0);
+ goto out;
+ }
+
+ sel = (csr - CSR_BUS_MANAGER_ID) / 4;
+ reg_write(ohci, OHCI1394_CSRData, lock_data);
+ reg_write(ohci, OHCI1394_CSRCompareData, lock_arg);
+ reg_write(ohci, OHCI1394_CSRControl, sel);
+
+ if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000)
+ lock_old = cpu_to_be32(reg_read(ohci, OHCI1394_CSRData));
+ else
+ fw_notify("swap not done yet\n");
+
+ fw_fill_response(&response, packet->header,
+ RCODE_COMPLETE, &lock_old, sizeof(lock_old));
+ out:
+ fw_core_handle_response(&ohci->card, &response);
+}
+
+static void
+handle_local_request(struct context *ctx, struct fw_packet *packet)
+{
+ u64 offset;
+ u32 csr;
+
+ if (ctx == &ctx->ohci->at_request_ctx) {
+ packet->ack = ACK_PENDING;
+ packet->callback(packet, &ctx->ohci->card, packet->ack);
+ }
+
+ offset =
+ ((unsigned long long)
+ HEADER_GET_OFFSET_HIGH(packet->header[1]) << 32) |
+ packet->header[2];
+ csr = offset - CSR_REGISTER_BASE;
+
+ /* Handle config rom reads. */
+ if (csr >= CSR_CONFIG_ROM && csr < CSR_CONFIG_ROM_END)
+ handle_local_rom(ctx->ohci, packet, csr);
+ else switch (csr) {
+ case CSR_BUS_MANAGER_ID:
+ case CSR_BANDWIDTH_AVAILABLE:
+ case CSR_CHANNELS_AVAILABLE_HI:
+ case CSR_CHANNELS_AVAILABLE_LO:
+ handle_local_lock(ctx->ohci, packet, csr);
+ break;
+ default:
+ if (ctx == &ctx->ohci->at_request_ctx)
+ fw_core_handle_request(&ctx->ohci->card, packet);
+ else
+ fw_core_handle_response(&ctx->ohci->card, packet);
+ break;
+ }
+
+ if (ctx == &ctx->ohci->at_response_ctx) {
+ packet->ack = ACK_COMPLETE;
+ packet->callback(packet, &ctx->ohci->card, packet->ack);
+ }
+}
+
+static void
+at_context_transmit(struct context *ctx, struct fw_packet *packet)
+{
+ unsigned long flags;
+ int retval;
+
+ spin_lock_irqsave(&ctx->ohci->lock, flags);
+
+ if (HEADER_GET_DESTINATION(packet->header[0]) == ctx->ohci->node_id &&
+ ctx->ohci->generation == packet->generation) {
+ spin_unlock_irqrestore(&ctx->ohci->lock, flags);
+ handle_local_request(ctx, packet);
+ return;
+ }
+
+ retval = at_context_queue_packet(ctx, packet);
+ spin_unlock_irqrestore(&ctx->ohci->lock, flags);
+
+ if (retval < 0)
+ packet->callback(packet, &ctx->ohci->card, packet->ack);
+
+}
+
+static void bus_reset_tasklet(unsigned long data)
+{
+ struct fw_ohci *ohci = (struct fw_ohci *)data;
+ int self_id_count, i, j, reg;
+ int generation, new_generation;
+ unsigned long flags;
+
+ reg = reg_read(ohci, OHCI1394_NodeID);
+ if (!(reg & OHCI1394_NodeID_idValid)) {
+ fw_error("node ID not valid, new bus reset in progress\n");
+ return;
+ }
+ ohci->node_id = reg & 0xffff;
+
+ /*
+ * The count in the SelfIDCount register is the number of
+ * bytes in the self ID receive buffer. Since we also receive
+ * the inverted quadlets and a header quadlet, we shift one
+ * bit extra to get the actual number of self IDs.
+ */
+
+ self_id_count = (reg_read(ohci, OHCI1394_SelfIDCount) >> 3) & 0x3ff;
+ generation = (le32_to_cpu(ohci->self_id_cpu[0]) >> 16) & 0xff;
+
+ for (i = 1, j = 0; j < self_id_count; i += 2, j++) {
+ if (ohci->self_id_cpu[i] != ~ohci->self_id_cpu[i + 1])
+ fw_error("inconsistent self IDs\n");
+ ohci->self_id_buffer[j] = le32_to_cpu(ohci->self_id_cpu[i]);
+ }
+
+ /*
+ * Check the consistency of the self IDs we just read. The
+ * problem we face is that a new bus reset can start while we
+ * read out the self IDs from the DMA buffer. If this happens,
+ * the DMA buffer will be overwritten with new self IDs and we
+ * will read out inconsistent data. The OHCI specification
+ * (section 11.2) recommends a technique similar to
+ * linux/seqlock.h, where we remember the generation of the
+ * self IDs in the buffer before reading them out and compare
+ * it to the current generation after reading them out. If
+ * the two generations match we know we have a consistent set
+ * of self IDs.
+ */
+
+ new_generation = (reg_read(ohci, OHCI1394_SelfIDCount) >> 16) & 0xff;
+ if (new_generation != generation) {
+ fw_notify("recursive bus reset detected, "
+ "discarding self ids\n");
+ return;
+ }
+
+ /* FIXME: Document how the locking works. */
+ spin_lock_irqsave(&ohci->lock, flags);
+
+ ohci->generation = generation;
+ context_stop(&ohci->at_request_ctx);
+ context_stop(&ohci->at_response_ctx);
+ reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
+
+ /*
+ * This next bit is unrelated to the AT context stuff but we
+ * have to do it under the spinlock also. If a new config rom
+ * was set up before this reset, the old one is now no longer
+ * in use and we can free it. Update the config rom pointers
+ * to point to the current config rom and clear the
+ * next_config_rom pointer so a new udpate can take place.
+ */
+
+ if (ohci->next_config_rom != NULL) {
+ dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
+ ohci->config_rom, ohci->config_rom_bus);
+ ohci->config_rom = ohci->next_config_rom;
+ ohci->config_rom_bus = ohci->next_config_rom_bus;
+ ohci->next_config_rom = NULL;
+
+ /*
+ * Restore config_rom image and manually update
+ * config_rom registers. Writing the header quadlet
+ * will indicate that the config rom is ready, so we
+ * do that last.
+ */
+ reg_write(ohci, OHCI1394_BusOptions,
+ be32_to_cpu(ohci->config_rom[2]));
+ ohci->config_rom[0] = cpu_to_be32(ohci->next_header);
+ reg_write(ohci, OHCI1394_ConfigROMhdr, ohci->next_header);
+ }
+
+ spin_unlock_irqrestore(&ohci->lock, flags);
+
+ fw_core_handle_bus_reset(&ohci->card, ohci->node_id, generation,
+ self_id_count, ohci->self_id_buffer);
+}
+
+static irqreturn_t irq_handler(int irq, void *data)
+{
+ struct fw_ohci *ohci = data;
+ u32 event, iso_event, cycle_time;
+ int i;
+
+ event = reg_read(ohci, OHCI1394_IntEventClear);
+
+ if (!event)
+ return IRQ_NONE;
+
+ reg_write(ohci, OHCI1394_IntEventClear, event);
+
+ if (event & OHCI1394_selfIDComplete)
+ tasklet_schedule(&ohci->bus_reset_tasklet);
+
+ if (event & OHCI1394_RQPkt)
+ tasklet_schedule(&ohci->ar_request_ctx.tasklet);
+
+ if (event & OHCI1394_RSPkt)
+ tasklet_schedule(&ohci->ar_response_ctx.tasklet);
+
+ if (event & OHCI1394_reqTxComplete)
+ tasklet_schedule(&ohci->at_request_ctx.tasklet);
+
+ if (event & OHCI1394_respTxComplete)
+ tasklet_schedule(&ohci->at_response_ctx.tasklet);
+
+ iso_event = reg_read(ohci, OHCI1394_IsoRecvIntEventClear);
+ reg_write(ohci, OHCI1394_IsoRecvIntEventClear, iso_event);
+
+ while (iso_event) {
+ i = ffs(iso_event) - 1;
+ tasklet_schedule(&ohci->ir_context_list[i].context.tasklet);
+ iso_event &= ~(1 << i);
+ }
+
+ iso_event = reg_read(ohci, OHCI1394_IsoXmitIntEventClear);
+ reg_write(ohci, OHCI1394_IsoXmitIntEventClear, iso_event);
+
+ while (iso_event) {
+ i = ffs(iso_event) - 1;
+ tasklet_schedule(&ohci->it_context_list[i].context.tasklet);
+ iso_event &= ~(1 << i);
+ }
+
+ if (event & OHCI1394_cycle64Seconds) {
+ cycle_time = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
+ if ((cycle_time & 0x80000000) == 0)
+ ohci->bus_seconds++;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int ohci_enable(struct fw_card *card, u32 *config_rom, size_t length)
+{
+ struct fw_ohci *ohci = fw_ohci(card);
+ struct pci_dev *dev = to_pci_dev(card->device);
+
+ /*
+ * When the link is not yet enabled, the atomic config rom
+ * update mechanism described below in ohci_set_config_rom()
+ * is not active. We have to update ConfigRomHeader and
+ * BusOptions manually, and the write to ConfigROMmap takes
+ * effect immediately. We tie this to the enabling of the
+ * link, so we have a valid config rom before enabling - the
+ * OHCI requires that ConfigROMhdr and BusOptions have valid
+ * values before enabling.
+ *
+ * However, when the ConfigROMmap is written, some controllers
+ * always read back quadlets 0 and 2 from the config rom to
+ * the ConfigRomHeader and BusOptions registers on bus reset.
+ * They shouldn't do that in this initial case where the link
+ * isn't enabled. This means we have to use the same
+ * workaround here, setting the bus header to 0 and then write
+ * the right values in the bus reset tasklet.
+ */
+
+ ohci->next_config_rom =
+ dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE,
+ &ohci->next_config_rom_bus, GFP_KERNEL);
+ if (ohci->next_config_rom == NULL)
+ return -ENOMEM;
+
+ memset(ohci->next_config_rom, 0, CONFIG_ROM_SIZE);
+ fw_memcpy_to_be32(ohci->next_config_rom, config_rom, length * 4);
+
+ ohci->next_header = config_rom[0];
+ ohci->next_config_rom[0] = 0;
+ reg_write(ohci, OHCI1394_ConfigROMhdr, 0);
+ reg_write(ohci, OHCI1394_BusOptions, config_rom[2]);
+ reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus);
+
+ reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000);
+
+ if (request_irq(dev->irq, irq_handler,
+ IRQF_SHARED, ohci_driver_name, ohci)) {
+ fw_error("Failed to allocate shared interrupt %d.\n",
+ dev->irq);
+ dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
+ ohci->config_rom, ohci->config_rom_bus);
+ return -EIO;
+ }
+
+ reg_write(ohci, OHCI1394_HCControlSet,
+ OHCI1394_HCControl_linkEnable |
+ OHCI1394_HCControl_BIBimageValid);
+ flush_writes(ohci);
+
+ /*
+ * We are ready to go, initiate bus reset to finish the
+ * initialization.
+ */
+
+ fw_core_initiate_bus_reset(&ohci->card, 1);
+
+ return 0;
+}
+
+static int
+ohci_set_config_rom(struct fw_card *card, u32 *config_rom, size_t length)
+{
+ struct fw_ohci *ohci;
+ unsigned long flags;
+ int retval = 0;
+ __be32 *next_config_rom;
+ dma_addr_t next_config_rom_bus;
+
+ ohci = fw_ohci(card);
+
+ /*
+ * When the OHCI controller is enabled, the config rom update
+ * mechanism is a bit tricky, but easy enough to use. See
+ * section 5.5.6 in the OHCI specification.
+ *
+ * The OHCI controller caches the new config rom address in a
+ * shadow register (ConfigROMmapNext) and needs a bus reset
+ * for the changes to take place. When the bus reset is
+ * detected, the controller loads the new values for the
+ * ConfigRomHeader and BusOptions registers from the specified
+ * config rom and loads ConfigROMmap from the ConfigROMmapNext
+ * shadow register. All automatically and atomically.
+ *
+ * Now, there's a twist to this story. The automatic load of
+ * ConfigRomHeader and BusOptions doesn't honor the
+ * noByteSwapData bit, so with a be32 config rom, the
+ * controller will load be32 values in to these registers
+ * during the atomic update, even on litte endian
+ * architectures. The workaround we use is to put a 0 in the
+ * header quadlet; 0 is endian agnostic and means that the
+ * config rom isn't ready yet. In the bus reset tasklet we
+ * then set up the real values for the two registers.
+ *
+ * We use ohci->lock to avoid racing with the code that sets
+ * ohci->next_config_rom to NULL (see bus_reset_tasklet).
+ */
+
+ next_config_rom =
+ dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE,
+ &next_config_rom_bus, GFP_KERNEL);
+ if (next_config_rom == NULL)
+ return -ENOMEM;
+
+ spin_lock_irqsave(&ohci->lock, flags);
+
+ if (ohci->next_config_rom == NULL) {
+ ohci->next_config_rom = next_config_rom;
+ ohci->next_config_rom_bus = next_config_rom_bus;
+
+ memset(ohci->next_config_rom, 0, CONFIG_ROM_SIZE);
+ fw_memcpy_to_be32(ohci->next_config_rom, config_rom,
+ length * 4);
+
+ ohci->next_header = config_rom[0];
+ ohci->next_config_rom[0] = 0;
+
+ reg_write(ohci, OHCI1394_ConfigROMmap,
+ ohci->next_config_rom_bus);
+ } else {
+ dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
+ next_config_rom, next_config_rom_bus);
+ retval = -EBUSY;
+ }
+
+ spin_unlock_irqrestore(&ohci->lock, flags);
+
+ /*
+ * Now initiate a bus reset to have the changes take
+ * effect. We clean up the old config rom memory and DMA
+ * mappings in the bus reset tasklet, since the OHCI
+ * controller could need to access it before the bus reset
+ * takes effect.
+ */
+ if (retval == 0)
+ fw_core_initiate_bus_reset(&ohci->card, 1);
+
+ return retval;
+}
+
+static void ohci_send_request(struct fw_card *card, struct fw_packet *packet)
+{
+ struct fw_ohci *ohci = fw_ohci(card);
+
+ at_context_transmit(&ohci->at_request_ctx, packet);
+}
+
+static void ohci_send_response(struct fw_card *card, struct fw_packet *packet)
+{
+ struct fw_ohci *ohci = fw_ohci(card);
+
+ at_context_transmit(&ohci->at_response_ctx, packet);
+}
+
+static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet)
+{
+ struct fw_ohci *ohci = fw_ohci(card);
+ struct context *ctx = &ohci->at_request_ctx;
+ struct driver_data *driver_data = packet->driver_data;
+ int retval = -ENOENT;
+
+ tasklet_disable(&ctx->tasklet);
+
+ if (packet->ack != 0)
+ goto out;
+
+ driver_data->packet = NULL;
+ packet->ack = RCODE_CANCELLED;
+ packet->callback(packet, &ohci->card, packet->ack);
+ retval = 0;
+
+ out:
+ tasklet_enable(&ctx->tasklet);
+
+ return retval;
+}
+
+static int
+ohci_enable_phys_dma(struct fw_card *card, int node_id, int generation)
+{
+ struct fw_ohci *ohci = fw_ohci(card);
+ unsigned long flags;
+ int n, retval = 0;
+
+ /*
+ * FIXME: Make sure this bitmask is cleared when we clear the busReset
+ * interrupt bit. Clear physReqResourceAllBuses on bus reset.
+ */
+
+ spin_lock_irqsave(&ohci->lock, flags);
+
+ if (ohci->generation != generation) {
+ retval = -ESTALE;
+ goto out;
+ }
+
+ /*
+ * Note, if the node ID contains a non-local bus ID, physical DMA is
+ * enabled for _all_ nodes on remote buses.
+ */
+
+ n = (node_id & 0xffc0) == LOCAL_BUS ? node_id & 0x3f : 63;
+ if (n < 32)
+ reg_write(ohci, OHCI1394_PhyReqFilterLoSet, 1 << n);
+ else
+ reg_write(ohci, OHCI1394_PhyReqFilterHiSet, 1 << (n - 32));
+
+ flush_writes(ohci);
+ out:
+ spin_unlock_irqrestore(&ohci->lock, flags);
+ return retval;
+}
+
+static u64
+ohci_get_bus_time(struct fw_card *card)
+{
+ struct fw_ohci *ohci = fw_ohci(card);
+ u32 cycle_time;
+ u64 bus_time;
+
+ cycle_time = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
+ bus_time = ((u64) ohci->bus_seconds << 32) | cycle_time;
+
+ return bus_time;
+}
+
+static int handle_ir_dualbuffer_packet(struct context *context,
+ struct descriptor *d,
+ struct descriptor *last)
+{
+ struct iso_context *ctx =
+ container_of(context, struct iso_context, context);
+ struct db_descriptor *db = (struct db_descriptor *) d;
+ __le32 *ir_header;
+ size_t header_length;
+ void *p, *end;
+ int i;
+
+ if (db->first_res_count > 0 && db->second_res_count > 0)
+ /* This descriptor isn't done yet, stop iteration. */
+ return 0;
+
+ header_length = le16_to_cpu(db->first_req_count) -
+ le16_to_cpu(db->first_res_count);
+
+ i = ctx->header_length;
+ p = db + 1;
+ end = p + header_length;
+ while (p < end && i + ctx->base.header_size <= PAGE_SIZE) {
+ /*
+ * The iso header is byteswapped to little endian by
+ * the controller, but the remaining header quadlets
+ * are big endian. We want to present all the headers
+ * as big endian, so we have to swap the first
+ * quadlet.
+ */
+ *(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4));
+ memcpy(ctx->header + i + 4, p + 8, ctx->base.header_size - 4);
+ i += ctx->base.header_size;
+ p += ctx->base.header_size + 4;
+ }
+
+ ctx->header_length = i;
+
+ if (le16_to_cpu(db->control) & DESCRIPTOR_IRQ_ALWAYS) {
+ ir_header = (__le32 *) (db + 1);
+ ctx->base.callback(&ctx->base,
+ le32_to_cpu(ir_header[0]) & 0xffff,
+ ctx->header_length, ctx->header,
+ ctx->base.callback_data);
+ ctx->header_length = 0;
+ }
+
+ return 1;
+}
+
+static int handle_it_packet(struct context *context,
+ struct descriptor *d,
+ struct descriptor *last)
+{
+ struct iso_context *ctx =
+ container_of(context, struct iso_context, context);
+
+ if (last->transfer_status == 0)
+ /* This descriptor isn't done yet, stop iteration. */
+ return 0;
+
+ if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS)
+ ctx->base.callback(&ctx->base, le16_to_cpu(last->res_count),
+ 0, NULL, ctx->base.callback_data);
+
+ return 1;
+}
+
+static struct fw_iso_context *
+ohci_allocate_iso_context(struct fw_card *card, int type, size_t header_size)
+{
+ struct fw_ohci *ohci = fw_ohci(card);
+ struct iso_context *ctx, *list;
+ descriptor_callback_t callback;
+ u32 *mask, regs;
+ unsigned long flags;
+ int index, retval = -ENOMEM;
+
+ if (type == FW_ISO_CONTEXT_TRANSMIT) {
+ mask = &ohci->it_context_mask;
+ list = ohci->it_context_list;
+ callback = handle_it_packet;
+ } else {
+ mask = &ohci->ir_context_mask;
+ list = ohci->ir_context_list;
+ callback = handle_ir_dualbuffer_packet;
+ }
+
+ /* FIXME: We need a fallback for pre 1.1 OHCI. */
+ if (callback == handle_ir_dualbuffer_packet &&
+ ohci->version < OHCI_VERSION_1_1)
+ return ERR_PTR(-EINVAL);
+
+ spin_lock_irqsave(&ohci->lock, flags);
+ index = ffs(*mask) - 1;
+ if (index >= 0)
+ *mask &= ~(1 << index);
+ spin_unlock_irqrestore(&ohci->lock, flags);
+
+ if (index < 0)
+ return ERR_PTR(-EBUSY);
+
+ if (type == FW_ISO_CONTEXT_TRANSMIT)
+ regs = OHCI1394_IsoXmitContextBase(index);
+ else
+ regs = OHCI1394_IsoRcvContextBase(index);
+
+ ctx = &list[index];
+ memset(ctx, 0, sizeof(*ctx));
+ ctx->header_length = 0;
+ ctx->header = (void *) __get_free_page(GFP_KERNEL);
+ if (ctx->header == NULL)
+ goto out;
+
+ retval = context_init(&ctx->context, ohci, ISO_BUFFER_SIZE,
+ regs, callback);
+ if (retval < 0)
+ goto out_with_header;
+
+ return &ctx->base;
+
+ out_with_header:
+ free_page((unsigned long)ctx->header);
+ out:
+ spin_lock_irqsave(&ohci->lock, flags);
+ *mask |= 1 << index;
+ spin_unlock_irqrestore(&ohci->lock, flags);
+
+ return ERR_PTR(retval);
+}
+
+static int ohci_start_iso(struct fw_iso_context *base,
+ s32 cycle, u32 sync, u32 tags)
+{
+ struct iso_context *ctx = container_of(base, struct iso_context, base);
+ struct fw_ohci *ohci = ctx->context.ohci;
+ u32 control, match;
+ int index;
+
+ if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) {
+ index = ctx - ohci->it_context_list;
+ match = 0;
+ if (cycle >= 0)
+ match = IT_CONTEXT_CYCLE_MATCH_ENABLE |
+ (cycle & 0x7fff) << 16;
+
+ reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 1 << index);
+ reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << index);
+ context_run(&ctx->context, match);
+ } else {
+ index = ctx - ohci->ir_context_list;
+ control = IR_CONTEXT_DUAL_BUFFER_MODE | IR_CONTEXT_ISOCH_HEADER;
+ match = (tags << 28) | (sync << 8) | ctx->base.channel;
+ if (cycle >= 0) {
+ match |= (cycle & 0x07fff) << 12;
+ control |= IR_CONTEXT_CYCLE_MATCH_ENABLE;
+ }
+
+ reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 1 << index);
+ reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << index);
+ reg_write(ohci, CONTEXT_MATCH(ctx->context.regs), match);
+ context_run(&ctx->context, control);
+ }
+
+ return 0;
+}
+
+static int ohci_stop_iso(struct fw_iso_context *base)
+{
+ struct fw_ohci *ohci = fw_ohci(base->card);
+ struct iso_context *ctx = container_of(base, struct iso_context, base);
+ int index;
+
+ if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) {
+ index = ctx - ohci->it_context_list;
+ reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 1 << index);
+ } else {
+ index = ctx - ohci->ir_context_list;
+ reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 1 << index);
+ }
+ flush_writes(ohci);
+ context_stop(&ctx->context);
+
+ return 0;
+}
+
+static void ohci_free_iso_context(struct fw_iso_context *base)
+{
+ struct fw_ohci *ohci = fw_ohci(base->card);
+ struct iso_context *ctx = container_of(base, struct iso_context, base);
+ unsigned long flags;
+ int index;
+
+ ohci_stop_iso(base);
+ context_release(&ctx->context);
+ free_page((unsigned long)ctx->header);
+
+ spin_lock_irqsave(&ohci->lock, flags);
+
+ if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) {
+ index = ctx - ohci->it_context_list;
+ ohci->it_context_mask |= 1 << index;
+ } else {
+ index = ctx - ohci->ir_context_list;
+ ohci->ir_context_mask |= 1 << index;
+ }
+
+ spin_unlock_irqrestore(&ohci->lock, flags);
+}
+
+static int
+ohci_queue_iso_transmit(struct fw_iso_context *base,
+ struct fw_iso_packet *packet,
+ struct fw_iso_buffer *buffer,
+ unsigned long payload)
+{
+ struct iso_context *ctx = container_of(base, struct iso_context, base);
+ struct descriptor *d, *last, *pd;
+ struct fw_iso_packet *p;
+ __le32 *header;
+ dma_addr_t d_bus, page_bus;
+ u32 z, header_z, payload_z, irq;
+ u32 payload_index, payload_end_index, next_page_index;
+ int page, end_page, i, length, offset;
+
+ /*
+ * FIXME: Cycle lost behavior should be configurable: lose
+ * packet, retransmit or terminate..
+ */
+
+ p = packet;
+ payload_index = payload;
+
+ if (p->skip)
+ z = 1;
+ else
+ z = 2;
+ if (p->header_length > 0)
+ z++;
+
+ /* Determine the first page the payload isn't contained in. */
+ end_page = PAGE_ALIGN(payload_index + p->payload_length) >> PAGE_SHIFT;
+ if (p->payload_length > 0)
+ payload_z = end_page - (payload_index >> PAGE_SHIFT);
+ else
+ payload_z = 0;
+
+ z += payload_z;
+
+ /* Get header size in number of descriptors. */
+ header_z = DIV_ROUND_UP(p->header_length, sizeof(*d));
+
+ d = context_get_descriptors(&ctx->context, z + header_z, &d_bus);
+ if (d == NULL)
+ return -ENOMEM;
+
+ if (!p->skip) {
+ d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
+ d[0].req_count = cpu_to_le16(8);
+
+ header = (__le32 *) &d[1];
+ header[0] = cpu_to_le32(IT_HEADER_SY(p->sy) |
+ IT_HEADER_TAG(p->tag) |
+ IT_HEADER_TCODE(TCODE_STREAM_DATA) |
+ IT_HEADER_CHANNEL(ctx->base.channel) |
+ IT_HEADER_SPEED(ctx->base.speed));
+ header[1] =
+ cpu_to_le32(IT_HEADER_DATA_LENGTH(p->header_length +
+ p->payload_length));
+ }
+
+ if (p->header_length > 0) {
+ d[2].req_count = cpu_to_le16(p->header_length);
+ d[2].data_address = cpu_to_le32(d_bus + z * sizeof(*d));
+ memcpy(&d[z], p->header, p->header_length);
+ }
+
+ pd = d + z - payload_z;
+ payload_end_index = payload_index + p->payload_length;
+ for (i = 0; i < payload_z; i++) {
+ page = payload_index >> PAGE_SHIFT;
+ offset = payload_index & ~PAGE_MASK;
+ next_page_index = (page + 1) << PAGE_SHIFT;
+ length =
+ min(next_page_index, payload_end_index) - payload_index;
+ pd[i].req_count = cpu_to_le16(length);
+
+ page_bus = page_private(buffer->pages[page]);
+ pd[i].data_address = cpu_to_le32(page_bus + offset);
+
+ payload_index += length;
+ }
+
+ if (p->interrupt)
+ irq = DESCRIPTOR_IRQ_ALWAYS;
+ else
+ irq = DESCRIPTOR_NO_IRQ;
+
+ last = z == 2 ? d : d + z - 1;
+ last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST |
+ DESCRIPTOR_STATUS |
+ DESCRIPTOR_BRANCH_ALWAYS |
+ irq);
+
+ context_append(&ctx->context, d, z, header_z);
+
+ return 0;
+}
+
+static int
+ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base,
+ struct fw_iso_packet *packet,
+ struct fw_iso_buffer *buffer,
+ unsigned long payload)
+{
+ struct iso_context *ctx = container_of(base, struct iso_context, base);
+ struct db_descriptor *db = NULL;
+ struct descriptor *d;
+ struct fw_iso_packet *p;
+ dma_addr_t d_bus, page_bus;
+ u32 z, header_z, length, rest;
+ int page, offset, packet_count, header_size;
+
+ /*
+ * FIXME: Cycle lost behavior should be configurable: lose
+ * packet, retransmit or terminate..
+ */
+
+ if (packet->skip) {
+ d = context_get_descriptors(&ctx->context, 2, &d_bus);
+ if (d == NULL)
+ return -ENOMEM;
+
+ db = (struct db_descriptor *) d;
+ db->control = cpu_to_le16(DESCRIPTOR_STATUS |
+ DESCRIPTOR_BRANCH_ALWAYS |
+ DESCRIPTOR_WAIT);
+ db->first_size = cpu_to_le16(ctx->base.header_size + 4);
+ context_append(&ctx->context, d, 2, 0);
+ }
+
+ p = packet;
+ z = 2;
+
+ /*
+ * The OHCI controller puts the status word in the header
+ * buffer too, so we need 4 extra bytes per packet.
+ */
+ packet_count = p->header_length / ctx->base.header_size;
+ header_size = packet_count * (ctx->base.header_size + 4);
+
+ /* Get header size in number of descriptors. */
+ header_z = DIV_ROUND_UP(header_size, sizeof(*d));
+ page = payload >> PAGE_SHIFT;
+ offset = payload & ~PAGE_MASK;
+ rest = p->payload_length;
+
+ /* FIXME: OHCI 1.0 doesn't support dual buffer receive */
+ /* FIXME: make packet-per-buffer/dual-buffer a context option */
+ while (rest > 0) {
+ d = context_get_descriptors(&ctx->context,
+ z + header_z, &d_bus);
+ if (d == NULL)
+ return -ENOMEM;
+
+ db = (struct db_descriptor *) d;
+ db->control = cpu_to_le16(DESCRIPTOR_STATUS |
+ DESCRIPTOR_BRANCH_ALWAYS);
+ db->first_size = cpu_to_le16(ctx->base.header_size + 4);
+ db->first_req_count = cpu_to_le16(header_size);
+ db->first_res_count = db->first_req_count;
+ db->first_buffer = cpu_to_le32(d_bus + sizeof(*db));
+
+ if (offset + rest < PAGE_SIZE)
+ length = rest;
+ else
+ length = PAGE_SIZE - offset;
+
+ db->second_req_count = cpu_to_le16(length);
+ db->second_res_count = db->second_req_count;
+ page_bus = page_private(buffer->pages[page]);
+ db->second_buffer = cpu_to_le32(page_bus + offset);
+
+ if (p->interrupt && length == rest)
+ db->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS);
+
+ context_append(&ctx->context, d, z, header_z);
+ offset = (offset + length) & ~PAGE_MASK;
+ rest -= length;
+ page++;
+ }
+
+ return 0;
+}
+
+static int
+ohci_queue_iso(struct fw_iso_context *base,
+ struct fw_iso_packet *packet,
+ struct fw_iso_buffer *buffer,
+ unsigned long payload)
+{
+ struct iso_context *ctx = container_of(base, struct iso_context, base);
+
+ if (base->type == FW_ISO_CONTEXT_TRANSMIT)
+ return ohci_queue_iso_transmit(base, packet, buffer, payload);
+ else if (ctx->context.ohci->version >= OHCI_VERSION_1_1)
+ return ohci_queue_iso_receive_dualbuffer(base, packet,
+ buffer, payload);
+ else
+ /* FIXME: Implement fallback for OHCI 1.0 controllers. */
+ return -EINVAL;
+}
+
+static const struct fw_card_driver ohci_driver = {
+ .name = ohci_driver_name,
+ .enable = ohci_enable,
+ .update_phy_reg = ohci_update_phy_reg,
+ .set_config_rom = ohci_set_config_rom,
+ .send_request = ohci_send_request,
+ .send_response = ohci_send_response,
+ .cancel_packet = ohci_cancel_packet,
+ .enable_phys_dma = ohci_enable_phys_dma,
+ .get_bus_time = ohci_get_bus_time,
+
+ .allocate_iso_context = ohci_allocate_iso_context,
+ .free_iso_context = ohci_free_iso_context,
+ .queue_iso = ohci_queue_iso,
+ .start_iso = ohci_start_iso,
+ .stop_iso = ohci_stop_iso,
+};
+
+static int software_reset(struct fw_ohci *ohci)
+{
+ int i;
+
+ reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset);
+
+ for (i = 0; i < OHCI_LOOP_COUNT; i++) {
+ if ((reg_read(ohci, OHCI1394_HCControlSet) &
+ OHCI1394_HCControl_softReset) == 0)
+ return 0;
+ msleep(1);
+ }
+
+ return -EBUSY;
+}
+
+static int __devinit
+pci_probe(struct pci_dev *dev, const struct pci_device_id *ent)
+{
+ struct fw_ohci *ohci;
+ u32 bus_options, max_receive, link_speed;
+ u64 guid;
+ int err;
+ size_t size;
+
+ ohci = kzalloc(sizeof(*ohci), GFP_KERNEL);
+ if (ohci == NULL) {
+ fw_error("Could not malloc fw_ohci data.\n");
+ return -ENOMEM;
+ }
+
+ fw_card_initialize(&ohci->card, &ohci_driver, &dev->dev);
+
+ err = pci_enable_device(dev);
+ if (err) {
+ fw_error("Failed to enable OHCI hardware.\n");
+ goto fail_put_card;
+ }
+
+ pci_set_master(dev);
+ pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0);
+ pci_set_drvdata(dev, ohci);
+
+ spin_lock_init(&ohci->lock);
+
+ tasklet_init(&ohci->bus_reset_tasklet,
+ bus_reset_tasklet, (unsigned long)ohci);
+
+ err = pci_request_region(dev, 0, ohci_driver_name);
+ if (err) {
+ fw_error("MMIO resource unavailable\n");
+ goto fail_disable;
+ }
+
+ ohci->registers = pci_iomap(dev, 0, OHCI1394_REGISTER_SIZE);
+ if (ohci->registers == NULL) {
+ fw_error("Failed to remap registers\n");
+ err = -ENXIO;
+ goto fail_iomem;
+ }
+
+ if (software_reset(ohci)) {
+ fw_error("Failed to reset ohci card.\n");
+ err = -EBUSY;
+ goto fail_registers;
+ }
+
+ /*
+ * Now enable LPS, which we need in order to start accessing
+ * most of the registers. In fact, on some cards (ALI M5251),
+ * accessing registers in the SClk domain without LPS enabled
+ * will lock up the machine. Wait 50msec to make sure we have
+ * full link enabled.
+ */
+ reg_write(ohci, OHCI1394_HCControlSet,
+ OHCI1394_HCControl_LPS |
+ OHCI1394_HCControl_postedWriteEnable);
+ flush_writes(ohci);
+ msleep(50);
+
+ reg_write(ohci, OHCI1394_HCControlClear,
+ OHCI1394_HCControl_noByteSwapData);
+
+ reg_write(ohci, OHCI1394_LinkControlSet,
+ OHCI1394_LinkControl_rcvSelfID |
+ OHCI1394_LinkControl_cycleTimerEnable |
+ OHCI1394_LinkControl_cycleMaster);
+
+ ar_context_init(&ohci->ar_request_ctx, ohci,
+ OHCI1394_AsReqRcvContextControlSet);
+
+ ar_context_init(&ohci->ar_response_ctx, ohci,
+ OHCI1394_AsRspRcvContextControlSet);
+
+ context_init(&ohci->at_request_ctx, ohci, AT_BUFFER_SIZE,
+ OHCI1394_AsReqTrContextControlSet, handle_at_packet);
+
+ context_init(&ohci->at_response_ctx, ohci, AT_BUFFER_SIZE,
+ OHCI1394_AsRspTrContextControlSet, handle_at_packet);
+
+ reg_write(ohci, OHCI1394_ATRetries,
+ OHCI1394_MAX_AT_REQ_RETRIES |
+ (OHCI1394_MAX_AT_RESP_RETRIES << 4) |
+ (OHCI1394_MAX_PHYS_RESP_RETRIES << 8));
+
+ reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0);
+ ohci->it_context_mask = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet);
+ reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, ~0);
+ size = sizeof(struct iso_context) * hweight32(ohci->it_context_mask);
+ ohci->it_context_list = kzalloc(size, GFP_KERNEL);
+
+ reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0);
+ ohci->ir_context_mask = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet);
+ reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0);
+ size = sizeof(struct iso_context) * hweight32(ohci->ir_context_mask);
+ ohci->ir_context_list = kzalloc(size, GFP_KERNEL);
+
+ if (ohci->it_context_list == NULL || ohci->ir_context_list == NULL) {
+ fw_error("Out of memory for it/ir contexts.\n");
+ err = -ENOMEM;
+ goto fail_registers;
+ }
+
+ /* self-id dma buffer allocation */
+ ohci->self_id_cpu = dma_alloc_coherent(ohci->card.device,
+ SELF_ID_BUF_SIZE,
+ &ohci->self_id_bus,
+ GFP_KERNEL);
+ if (ohci->self_id_cpu == NULL) {
+ fw_error("Out of memory for self ID buffer.\n");
+ err = -ENOMEM;
+ goto fail_registers;
+ }
+
+ reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->self_id_bus);
+ reg_write(ohci, OHCI1394_PhyUpperBound, 0x00010000);
+ reg_write(ohci, OHCI1394_IntEventClear, ~0);
+ reg_write(ohci, OHCI1394_IntMaskClear, ~0);
+ reg_write(ohci, OHCI1394_IntMaskSet,
+ OHCI1394_selfIDComplete |
+ OHCI1394_RQPkt | OHCI1394_RSPkt |
+ OHCI1394_reqTxComplete | OHCI1394_respTxComplete |
+ OHCI1394_isochRx | OHCI1394_isochTx |
+ OHCI1394_masterIntEnable |
+ OHCI1394_cycle64Seconds);
+
+ bus_options = reg_read(ohci, OHCI1394_BusOptions);
+ max_receive = (bus_options >> 12) & 0xf;
+ link_speed = bus_options & 0x7;
+ guid = ((u64) reg_read(ohci, OHCI1394_GUIDHi) << 32) |
+ reg_read(ohci, OHCI1394_GUIDLo);
+
+ err = fw_card_add(&ohci->card, max_receive, link_speed, guid);
+ if (err < 0)
+ goto fail_self_id;
+
+ ohci->version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
+ fw_notify("Added fw-ohci device %s, OHCI version %x.%x\n",
+ dev->dev.bus_id, ohci->version >> 16, ohci->version & 0xff);
+
+ return 0;
+
+ fail_self_id:
+ dma_free_coherent(ohci->card.device, SELF_ID_BUF_SIZE,
+ ohci->self_id_cpu, ohci->self_id_bus);
+ fail_registers:
+ kfree(ohci->it_context_list);
+ kfree(ohci->ir_context_list);
+ pci_iounmap(dev, ohci->registers);
+ fail_iomem:
+ pci_release_region(dev, 0);
+ fail_disable:
+ pci_disable_device(dev);
+ fail_put_card:
+ fw_card_put(&ohci->card);
+
+ return err;
+}
+
+static void pci_remove(struct pci_dev *dev)
+{
+ struct fw_ohci *ohci;
+
+ ohci = pci_get_drvdata(dev);
+ reg_write(ohci, OHCI1394_IntMaskClear, ~0);
+ flush_writes(ohci);
+ fw_core_remove_card(&ohci->card);
+
+ /*
+ * FIXME: Fail all pending packets here, now that the upper
+ * layers can't queue any more.
+ */
+
+ software_reset(ohci);
+ free_irq(dev->irq, ohci);
+ dma_free_coherent(ohci->card.device, SELF_ID_BUF_SIZE,
+ ohci->self_id_cpu, ohci->self_id_bus);
+ kfree(ohci->it_context_list);
+ kfree(ohci->ir_context_list);
+ pci_iounmap(dev, ohci->registers);
+ pci_release_region(dev, 0);
+ pci_disable_device(dev);
+ fw_card_put(&ohci->card);
+
+ fw_notify("Removed fw-ohci device.\n");
+}
+
+static struct pci_device_id pci_table[] = {
+ { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_FIREWIRE_OHCI, ~0) },
+ { }
+};
+
+MODULE_DEVICE_TABLE(pci, pci_table);
+
+static struct pci_driver fw_ohci_pci_driver = {
+ .name = ohci_driver_name,
+ .id_table = pci_table,
+ .probe = pci_probe,
+ .remove = pci_remove,
+};
+
+MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
+MODULE_DESCRIPTION("Driver for PCI OHCI IEEE1394 controllers");
+MODULE_LICENSE("GPL");
+
+/* Provide a module alias so root-on-sbp2 initrds don't break. */
+#ifndef CONFIG_IEEE1394_OHCI1394_MODULE
+MODULE_ALIAS("ohci1394");
+#endif
+
+static int __init fw_ohci_init(void)
+{
+ return pci_register_driver(&fw_ohci_pci_driver);
+}
+
+static void __exit fw_ohci_cleanup(void)
+{
+ pci_unregister_driver(&fw_ohci_pci_driver);
+}
+
+module_init(fw_ohci_init);
+module_exit(fw_ohci_cleanup);
diff --git a/drivers/firewire/fw-ohci.h b/drivers/firewire/fw-ohci.h
new file mode 100644
index 00000000000..fa15706397d
--- /dev/null
+++ b/drivers/firewire/fw-ohci.h
@@ -0,0 +1,153 @@
+#ifndef __fw_ohci_h
+#define __fw_ohci_h
+
+/* OHCI register map */
+
+#define OHCI1394_Version 0x000
+#define OHCI1394_GUID_ROM 0x004
+#define OHCI1394_ATRetries 0x008
+#define OHCI1394_CSRData 0x00C
+#define OHCI1394_CSRCompareData 0x010
+#define OHCI1394_CSRControl 0x014
+#define OHCI1394_ConfigROMhdr 0x018
+#define OHCI1394_BusID 0x01C
+#define OHCI1394_BusOptions 0x020
+#define OHCI1394_GUIDHi 0x024
+#define OHCI1394_GUIDLo 0x028
+#define OHCI1394_ConfigROMmap 0x034
+#define OHCI1394_PostedWriteAddressLo 0x038
+#define OHCI1394_PostedWriteAddressHi 0x03C
+#define OHCI1394_VendorID 0x040
+#define OHCI1394_HCControlSet 0x050
+#define OHCI1394_HCControlClear 0x054
+#define OHCI1394_HCControl_BIBimageValid 0x80000000
+#define OHCI1394_HCControl_noByteSwapData 0x40000000
+#define OHCI1394_HCControl_programPhyEnable 0x00800000
+#define OHCI1394_HCControl_aPhyEnhanceEnable 0x00400000
+#define OHCI1394_HCControl_LPS 0x00080000
+#define OHCI1394_HCControl_postedWriteEnable 0x00040000
+#define OHCI1394_HCControl_linkEnable 0x00020000
+#define OHCI1394_HCControl_softReset 0x00010000
+#define OHCI1394_SelfIDBuffer 0x064
+#define OHCI1394_SelfIDCount 0x068
+#define OHCI1394_IRMultiChanMaskHiSet 0x070
+#define OHCI1394_IRMultiChanMaskHiClear 0x074
+#define OHCI1394_IRMultiChanMaskLoSet 0x078
+#define OHCI1394_IRMultiChanMaskLoClear 0x07C
+#define OHCI1394_IntEventSet 0x080
+#define OHCI1394_IntEventClear 0x084
+#define OHCI1394_IntMaskSet 0x088
+#define OHCI1394_IntMaskClear 0x08C
+#define OHCI1394_IsoXmitIntEventSet 0x090
+#define OHCI1394_IsoXmitIntEventClear 0x094
+#define OHCI1394_IsoXmitIntMaskSet 0x098
+#define OHCI1394_IsoXmitIntMaskClear 0x09C
+#define OHCI1394_IsoRecvIntEventSet 0x0A0
+#define OHCI1394_IsoRecvIntEventClear 0x0A4
+#define OHCI1394_IsoRecvIntMaskSet 0x0A8
+#define OHCI1394_IsoRecvIntMaskClear 0x0AC
+#define OHCI1394_InitialBandwidthAvailable 0x0B0
+#define OHCI1394_InitialChannelsAvailableHi 0x0B4
+#define OHCI1394_InitialChannelsAvailableLo 0x0B8
+#define OHCI1394_FairnessControl 0x0DC
+#define OHCI1394_LinkControlSet 0x0E0
+#define OHCI1394_LinkControlClear 0x0E4
+#define OHCI1394_LinkControl_rcvSelfID (1 << 9)
+#define OHCI1394_LinkControl_rcvPhyPkt (1 << 10)
+#define OHCI1394_LinkControl_cycleTimerEnable (1 << 20)
+#define OHCI1394_LinkControl_cycleMaster (1 << 21)
+#define OHCI1394_LinkControl_cycleSource (1 << 22)
+#define OHCI1394_NodeID 0x0E8
+#define OHCI1394_NodeID_idValid 0x80000000
+#define OHCI1394_PhyControl 0x0EC
+#define OHCI1394_PhyControl_Read(addr) (((addr) << 8) | 0x00008000)
+#define OHCI1394_PhyControl_ReadDone 0x80000000
+#define OHCI1394_PhyControl_ReadData(r) (((r) & 0x00ff0000) >> 16)
+#define OHCI1394_PhyControl_Write(addr, data) (((addr) << 8) | (data) | 0x00004000)
+#define OHCI1394_PhyControl_WriteDone 0x00004000
+#define OHCI1394_IsochronousCycleTimer 0x0F0
+#define OHCI1394_AsReqFilterHiSet 0x100
+#define OHCI1394_AsReqFilterHiClear 0x104
+#define OHCI1394_AsReqFilterLoSet 0x108
+#define OHCI1394_AsReqFilterLoClear 0x10C
+#define OHCI1394_PhyReqFilterHiSet 0x110
+#define OHCI1394_PhyReqFilterHiClear 0x114
+#define OHCI1394_PhyReqFilterLoSet 0x118
+#define OHCI1394_PhyReqFilterLoClear 0x11C
+#define OHCI1394_PhyUpperBound 0x120
+
+#define OHCI1394_AsReqTrContextBase 0x180
+#define OHCI1394_AsReqTrContextControlSet 0x180
+#define OHCI1394_AsReqTrContextControlClear 0x184
+#define OHCI1394_AsReqTrCommandPtr 0x18C
+
+#define OHCI1394_AsRspTrContextBase 0x1A0
+#define OHCI1394_AsRspTrContextControlSet 0x1A0
+#define OHCI1394_AsRspTrContextControlClear 0x1A4
+#define OHCI1394_AsRspTrCommandPtr 0x1AC
+
+#define OHCI1394_AsReqRcvContextBase 0x1C0
+#define OHCI1394_AsReqRcvContextControlSet 0x1C0
+#define OHCI1394_AsReqRcvContextControlClear 0x1C4
+#define OHCI1394_AsReqRcvCommandPtr 0x1CC
+
+#define OHCI1394_AsRspRcvContextBase 0x1E0
+#define OHCI1394_AsRspRcvContextControlSet 0x1E0
+#define OHCI1394_AsRspRcvContextControlClear 0x1E4
+#define OHCI1394_AsRspRcvCommandPtr 0x1EC
+
+/* Isochronous transmit registers */
+#define OHCI1394_IsoXmitContextBase(n) (0x200 + 16 * (n))
+#define OHCI1394_IsoXmitContextControlSet(n) (0x200 + 16 * (n))
+#define OHCI1394_IsoXmitContextControlClear(n) (0x204 + 16 * (n))
+#define OHCI1394_IsoXmitCommandPtr(n) (0x20C + 16 * (n))
+
+/* Isochronous receive registers */
+#define OHCI1394_IsoRcvContextBase(n) (0x400 + 32 * (n))
+#define OHCI1394_IsoRcvContextControlSet(n) (0x400 + 32 * (n))
+#define OHCI1394_IsoRcvContextControlClear(n) (0x404 + 32 * (n))
+#define OHCI1394_IsoRcvCommandPtr(n) (0x40C + 32 * (n))
+#define OHCI1394_IsoRcvContextMatch(n) (0x410 + 32 * (n))
+
+/* Interrupts Mask/Events */
+#define OHCI1394_reqTxComplete 0x00000001
+#define OHCI1394_respTxComplete 0x00000002
+#define OHCI1394_ARRQ 0x00000004
+#define OHCI1394_ARRS 0x00000008
+#define OHCI1394_RQPkt 0x00000010
+#define OHCI1394_RSPkt 0x00000020
+#define OHCI1394_isochTx 0x00000040
+#define OHCI1394_isochRx 0x00000080
+#define OHCI1394_postedWriteErr 0x00000100
+#define OHCI1394_lockRespErr 0x00000200
+#define OHCI1394_selfIDComplete 0x00010000
+#define OHCI1394_busReset 0x00020000
+#define OHCI1394_phy 0x00080000
+#define OHCI1394_cycleSynch 0x00100000
+#define OHCI1394_cycle64Seconds 0x00200000
+#define OHCI1394_cycleLost 0x00400000
+#define OHCI1394_cycleInconsistent 0x00800000
+#define OHCI1394_unrecoverableError 0x01000000
+#define OHCI1394_cycleTooLong 0x02000000
+#define OHCI1394_phyRegRcvd 0x04000000
+#define OHCI1394_masterIntEnable 0x80000000
+
+#define OHCI1394_evt_no_status 0x0
+#define OHCI1394_evt_long_packet 0x2
+#define OHCI1394_evt_missing_ack 0x3
+#define OHCI1394_evt_underrun 0x4
+#define OHCI1394_evt_overrun 0x5
+#define OHCI1394_evt_descriptor_read 0x6
+#define OHCI1394_evt_data_read 0x7
+#define OHCI1394_evt_data_write 0x8
+#define OHCI1394_evt_bus_reset 0x9
+#define OHCI1394_evt_timeout 0xa
+#define OHCI1394_evt_tcode_err 0xb
+#define OHCI1394_evt_reserved_b 0xc
+#define OHCI1394_evt_reserved_c 0xd
+#define OHCI1394_evt_unknown 0xe
+#define OHCI1394_evt_flushed 0xf
+
+#define OHCI1394_phy_tcode 0xe
+
+#endif /* __fw_ohci_h */
diff --git a/drivers/firewire/fw-sbp2.c b/drivers/firewire/fw-sbp2.c
new file mode 100644
index 00000000000..68300414e5f
--- /dev/null
+++ b/drivers/firewire/fw-sbp2.c
@@ -0,0 +1,1147 @@
+/*
+ * SBP2 driver (SCSI over IEEE1394)
+ *
+ * Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/*
+ * The basic structure of this driver is based on the old storage driver,
+ * drivers/ieee1394/sbp2.c, originally written by
+ * James Goodwin <jamesg@filanet.com>
+ * with later contributions and ongoing maintenance from
+ * Ben Collins <bcollins@debian.org>,
+ * Stefan Richter <stefanr@s5r6.in-berlin.de>
+ * and many others.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/device.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-mapping.h>
+#include <linux/timer.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_dbg.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+
+#include "fw-transaction.h"
+#include "fw-topology.h"
+#include "fw-device.h"
+
+/* I don't know why the SCSI stack doesn't define something like this... */
+typedef void (*scsi_done_fn_t)(struct scsi_cmnd *);
+
+static const char sbp2_driver_name[] = "sbp2";
+
+struct sbp2_device {
+ struct kref kref;
+ struct fw_unit *unit;
+ struct fw_address_handler address_handler;
+ struct list_head orb_list;
+ u64 management_agent_address;
+ u64 command_block_agent_address;
+ u32 workarounds;
+ int login_id;
+
+ /*
+ * We cache these addresses and only update them once we've
+ * logged in or reconnected to the sbp2 device. That way, any
+ * IO to the device will automatically fail and get retried if
+ * it happens in a window where the device is not ready to
+ * handle it (e.g. after a bus reset but before we reconnect).
+ */
+ int node_id;
+ int address_high;
+ int generation;
+
+ int retries;
+ struct delayed_work work;
+};
+
+#define SBP2_MAX_SG_ELEMENT_LENGTH 0xf000
+#define SBP2_MAX_SECTORS 255 /* Max sectors supported */
+#define SBP2_ORB_TIMEOUT 2000 /* Timeout in ms */
+
+#define SBP2_ORB_NULL 0x80000000
+
+#define SBP2_DIRECTION_TO_MEDIA 0x0
+#define SBP2_DIRECTION_FROM_MEDIA 0x1
+
+/* Unit directory keys */
+#define SBP2_COMMAND_SET_SPECIFIER 0x38
+#define SBP2_COMMAND_SET 0x39
+#define SBP2_COMMAND_SET_REVISION 0x3b
+#define SBP2_FIRMWARE_REVISION 0x3c
+
+/* Flags for detected oddities and brokeness */
+#define SBP2_WORKAROUND_128K_MAX_TRANS 0x1
+#define SBP2_WORKAROUND_INQUIRY_36 0x2
+#define SBP2_WORKAROUND_MODE_SENSE_8 0x4
+#define SBP2_WORKAROUND_FIX_CAPACITY 0x8
+#define SBP2_WORKAROUND_OVERRIDE 0x100
+
+/* Management orb opcodes */
+#define SBP2_LOGIN_REQUEST 0x0
+#define SBP2_QUERY_LOGINS_REQUEST 0x1
+#define SBP2_RECONNECT_REQUEST 0x3
+#define SBP2_SET_PASSWORD_REQUEST 0x4
+#define SBP2_LOGOUT_REQUEST 0x7
+#define SBP2_ABORT_TASK_REQUEST 0xb
+#define SBP2_ABORT_TASK_SET 0xc
+#define SBP2_LOGICAL_UNIT_RESET 0xe
+#define SBP2_TARGET_RESET_REQUEST 0xf
+
+/* Offsets for command block agent registers */
+#define SBP2_AGENT_STATE 0x00
+#define SBP2_AGENT_RESET 0x04
+#define SBP2_ORB_POINTER 0x08
+#define SBP2_DOORBELL 0x10
+#define SBP2_UNSOLICITED_STATUS_ENABLE 0x14
+
+/* Status write response codes */
+#define SBP2_STATUS_REQUEST_COMPLETE 0x0
+#define SBP2_STATUS_TRANSPORT_FAILURE 0x1
+#define SBP2_STATUS_ILLEGAL_REQUEST 0x2
+#define SBP2_STATUS_VENDOR_DEPENDENT 0x3
+
+#define STATUS_GET_ORB_HIGH(v) ((v).status & 0xffff)
+#define STATUS_GET_SBP_STATUS(v) (((v).status >> 16) & 0xff)
+#define STATUS_GET_LEN(v) (((v).status >> 24) & 0x07)
+#define STATUS_GET_DEAD(v) (((v).status >> 27) & 0x01)
+#define STATUS_GET_RESPONSE(v) (((v).status >> 28) & 0x03)
+#define STATUS_GET_SOURCE(v) (((v).status >> 30) & 0x03)
+#define STATUS_GET_ORB_LOW(v) ((v).orb_low)
+#define STATUS_GET_DATA(v) ((v).data)
+
+struct sbp2_status {
+ u32 status;
+ u32 orb_low;
+ u8 data[24];
+};
+
+struct sbp2_pointer {
+ u32 high;
+ u32 low;
+};
+
+struct sbp2_orb {
+ struct fw_transaction t;
+ dma_addr_t request_bus;
+ int rcode;
+ struct sbp2_pointer pointer;
+ void (*callback)(struct sbp2_orb * orb, struct sbp2_status * status);
+ struct list_head link;
+};
+
+#define MANAGEMENT_ORB_LUN(v) ((v))
+#define MANAGEMENT_ORB_FUNCTION(v) ((v) << 16)
+#define MANAGEMENT_ORB_RECONNECT(v) ((v) << 20)
+#define MANAGEMENT_ORB_EXCLUSIVE ((1) << 28)
+#define MANAGEMENT_ORB_REQUEST_FORMAT(v) ((v) << 29)
+#define MANAGEMENT_ORB_NOTIFY ((1) << 31)
+
+#define MANAGEMENT_ORB_RESPONSE_LENGTH(v) ((v))
+#define MANAGEMENT_ORB_PASSWORD_LENGTH(v) ((v) << 16)
+
+struct sbp2_management_orb {
+ struct sbp2_orb base;
+ struct {
+ struct sbp2_pointer password;
+ struct sbp2_pointer response;
+ u32 misc;
+ u32 length;
+ struct sbp2_pointer status_fifo;
+ } request;
+ __be32 response[4];
+ dma_addr_t response_bus;
+ struct completion done;
+ struct sbp2_status status;
+};
+
+#define LOGIN_RESPONSE_GET_LOGIN_ID(v) ((v).misc & 0xffff)
+#define LOGIN_RESPONSE_GET_LENGTH(v) (((v).misc >> 16) & 0xffff)
+
+struct sbp2_login_response {
+ u32 misc;
+ struct sbp2_pointer command_block_agent;
+ u32 reconnect_hold;
+};
+#define COMMAND_ORB_DATA_SIZE(v) ((v))
+#define COMMAND_ORB_PAGE_SIZE(v) ((v) << 16)
+#define COMMAND_ORB_PAGE_TABLE_PRESENT ((1) << 19)
+#define COMMAND_ORB_MAX_PAYLOAD(v) ((v) << 20)
+#define COMMAND_ORB_SPEED(v) ((v) << 24)
+#define COMMAND_ORB_DIRECTION(v) ((v) << 27)
+#define COMMAND_ORB_REQUEST_FORMAT(v) ((v) << 29)
+#define COMMAND_ORB_NOTIFY ((1) << 31)
+
+struct sbp2_command_orb {
+ struct sbp2_orb base;
+ struct {
+ struct sbp2_pointer next;
+ struct sbp2_pointer data_descriptor;
+ u32 misc;
+ u8 command_block[12];
+ } request;
+ struct scsi_cmnd *cmd;
+ scsi_done_fn_t done;
+ struct fw_unit *unit;
+
+ struct sbp2_pointer page_table[SG_ALL];
+ dma_addr_t page_table_bus;
+ dma_addr_t request_buffer_bus;
+};
+
+/*
+ * List of devices with known bugs.
+ *
+ * The firmware_revision field, masked with 0xffff00, is the best
+ * indicator for the type of bridge chip of a device. It yields a few
+ * false positives but this did not break correctly behaving devices
+ * so far. We use ~0 as a wildcard, since the 24 bit values we get
+ * from the config rom can never match that.
+ */
+static const struct {
+ u32 firmware_revision;
+ u32 model;
+ unsigned workarounds;
+} sbp2_workarounds_table[] = {
+ /* DViCO Momobay CX-1 with TSB42AA9 bridge */ {
+ .firmware_revision = 0x002800,
+ .model = 0x001010,
+ .workarounds = SBP2_WORKAROUND_INQUIRY_36 |
+ SBP2_WORKAROUND_MODE_SENSE_8,
+ },
+ /* Initio bridges, actually only needed for some older ones */ {
+ .firmware_revision = 0x000200,
+ .model = ~0,
+ .workarounds = SBP2_WORKAROUND_INQUIRY_36,
+ },
+ /* Symbios bridge */ {
+ .firmware_revision = 0xa0b800,
+ .model = ~0,
+ .workarounds = SBP2_WORKAROUND_128K_MAX_TRANS,
+ },
+
+ /*
+ * There are iPods (2nd gen, 3rd gen) with model_id == 0, but
+ * these iPods do not feature the read_capacity bug according
+ * to one report. Read_capacity behaviour as well as model_id
+ * could change due to Apple-supplied firmware updates though.
+ */
+
+ /* iPod 4th generation. */ {
+ .firmware_revision = 0x0a2700,
+ .model = 0x000021,
+ .workarounds = SBP2_WORKAROUND_FIX_CAPACITY,
+ },
+ /* iPod mini */ {
+ .firmware_revision = 0x0a2700,
+ .model = 0x000023,
+ .workarounds = SBP2_WORKAROUND_FIX_CAPACITY,
+ },
+ /* iPod Photo */ {
+ .firmware_revision = 0x0a2700,
+ .model = 0x00007e,
+ .workarounds = SBP2_WORKAROUND_FIX_CAPACITY,
+ }
+};
+
+static void
+sbp2_status_write(struct fw_card *card, struct fw_request *request,
+ int tcode, int destination, int source,
+ int generation, int speed,
+ unsigned long long offset,
+ void *payload, size_t length, void *callback_data)
+{
+ struct sbp2_device *sd = callback_data;
+ struct sbp2_orb *orb;
+ struct sbp2_status status;
+ size_t header_size;
+ unsigned long flags;
+
+ if (tcode != TCODE_WRITE_BLOCK_REQUEST ||
+ length == 0 || length > sizeof(status)) {
+ fw_send_response(card, request, RCODE_TYPE_ERROR);
+ return;
+ }
+
+ header_size = min(length, 2 * sizeof(u32));
+ fw_memcpy_from_be32(&status, payload, header_size);
+ if (length > header_size)
+ memcpy(status.data, payload + 8, length - header_size);
+ if (STATUS_GET_SOURCE(status) == 2 || STATUS_GET_SOURCE(status) == 3) {
+ fw_notify("non-orb related status write, not handled\n");
+ fw_send_response(card, request, RCODE_COMPLETE);
+ return;
+ }
+
+ /* Lookup the orb corresponding to this status write. */
+ spin_lock_irqsave(&card->lock, flags);
+ list_for_each_entry(orb, &sd->orb_list, link) {
+ if (STATUS_GET_ORB_HIGH(status) == 0 &&
+ STATUS_GET_ORB_LOW(status) == orb->request_bus &&
+ orb->rcode == RCODE_COMPLETE) {
+ list_del(&orb->link);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&card->lock, flags);
+
+ if (&orb->link != &sd->orb_list)
+ orb->callback(orb, &status);
+ else
+ fw_error("status write for unknown orb\n");
+
+ fw_send_response(card, request, RCODE_COMPLETE);
+}
+
+static void
+complete_transaction(struct fw_card *card, int rcode,
+ void *payload, size_t length, void *data)
+{
+ struct sbp2_orb *orb = data;
+ unsigned long flags;
+
+ orb->rcode = rcode;
+ if (rcode != RCODE_COMPLETE) {
+ spin_lock_irqsave(&card->lock, flags);
+ list_del(&orb->link);
+ spin_unlock_irqrestore(&card->lock, flags);
+ orb->callback(orb, NULL);
+ }
+}
+
+static void
+sbp2_send_orb(struct sbp2_orb *orb, struct fw_unit *unit,
+ int node_id, int generation, u64 offset)
+{
+ struct fw_device *device = fw_device(unit->device.parent);
+ struct sbp2_device *sd = unit->device.driver_data;
+ unsigned long flags;
+
+ orb->pointer.high = 0;
+ orb->pointer.low = orb->request_bus;
+ fw_memcpy_to_be32(&orb->pointer, &orb->pointer, sizeof(orb->pointer));
+
+ spin_lock_irqsave(&device->card->lock, flags);
+ list_add_tail(&orb->link, &sd->orb_list);
+ spin_unlock_irqrestore(&device->card->lock, flags);
+
+ fw_send_request(device->card, &orb->t, TCODE_WRITE_BLOCK_REQUEST,
+ node_id, generation,
+ device->node->max_speed, offset,
+ &orb->pointer, sizeof(orb->pointer),
+ complete_transaction, orb);
+}
+
+static int sbp2_cancel_orbs(struct fw_unit *unit)
+{
+ struct fw_device *device = fw_device(unit->device.parent);
+ struct sbp2_device *sd = unit->device.driver_data;
+ struct sbp2_orb *orb, *next;
+ struct list_head list;
+ unsigned long flags;
+ int retval = -ENOENT;
+
+ INIT_LIST_HEAD(&list);
+ spin_lock_irqsave(&device->card->lock, flags);
+ list_splice_init(&sd->orb_list, &list);
+ spin_unlock_irqrestore(&device->card->lock, flags);
+
+ list_for_each_entry_safe(orb, next, &list, link) {
+ retval = 0;
+ if (fw_cancel_transaction(device->card, &orb->t) == 0)
+ continue;
+
+ orb->rcode = RCODE_CANCELLED;
+ orb->callback(orb, NULL);
+ }
+
+ return retval;
+}
+
+static void
+complete_management_orb(struct sbp2_orb *base_orb, struct sbp2_status *status)
+{
+ struct sbp2_management_orb *orb =
+ (struct sbp2_management_orb *)base_orb;
+
+ if (status)
+ memcpy(&orb->status, status, sizeof(*status));
+ complete(&orb->done);
+}
+
+static int
+sbp2_send_management_orb(struct fw_unit *unit, int node_id, int generation,
+ int function, int lun, void *response)
+{
+ struct fw_device *device = fw_device(unit->device.parent);
+ struct sbp2_device *sd = unit->device.driver_data;
+ struct sbp2_management_orb *orb;
+ int retval = -ENOMEM;
+
+ orb = kzalloc(sizeof(*orb), GFP_ATOMIC);
+ if (orb == NULL)
+ return -ENOMEM;
+
+ /*
+ * The sbp2 device is going to send a block read request to
+ * read out the request from host memory, so map it for dma.
+ */
+ orb->base.request_bus =
+ dma_map_single(device->card->device, &orb->request,
+ sizeof(orb->request), DMA_TO_DEVICE);
+ if (dma_mapping_error(orb->base.request_bus))
+ goto out;
+
+ orb->response_bus =
+ dma_map_single(device->card->device, &orb->response,
+ sizeof(orb->response), DMA_FROM_DEVICE);
+ if (dma_mapping_error(orb->response_bus))
+ goto out;
+
+ orb->request.response.high = 0;
+ orb->request.response.low = orb->response_bus;
+
+ orb->request.misc =
+ MANAGEMENT_ORB_NOTIFY |
+ MANAGEMENT_ORB_FUNCTION(function) |
+ MANAGEMENT_ORB_LUN(lun);
+ orb->request.length =
+ MANAGEMENT_ORB_RESPONSE_LENGTH(sizeof(orb->response));
+
+ orb->request.status_fifo.high = sd->address_handler.offset >> 32;
+ orb->request.status_fifo.low = sd->address_handler.offset;
+
+ /*
+ * FIXME: Yeah, ok this isn't elegant, we hardwire exclusive
+ * login and 1 second reconnect time. The reconnect setting
+ * is probably fine, but the exclusive login should be an option.
+ */
+ if (function == SBP2_LOGIN_REQUEST) {
+ orb->request.misc |=
+ MANAGEMENT_ORB_EXCLUSIVE |
+ MANAGEMENT_ORB_RECONNECT(0);
+ }
+
+ fw_memcpy_to_be32(&orb->request, &orb->request, sizeof(orb->request));
+
+ init_completion(&orb->done);
+ orb->base.callback = complete_management_orb;
+
+ sbp2_send_orb(&orb->base, unit,
+ node_id, generation, sd->management_agent_address);
+
+ wait_for_completion_timeout(&orb->done,
+ msecs_to_jiffies(SBP2_ORB_TIMEOUT));
+
+ retval = -EIO;
+ if (sbp2_cancel_orbs(unit) == 0) {
+ fw_error("orb reply timed out, rcode=0x%02x\n",
+ orb->base.rcode);
+ goto out;
+ }
+
+ if (orb->base.rcode != RCODE_COMPLETE) {
+ fw_error("management write failed, rcode 0x%02x\n",
+ orb->base.rcode);
+ goto out;
+ }
+
+ if (STATUS_GET_RESPONSE(orb->status) != 0 ||
+ STATUS_GET_SBP_STATUS(orb->status) != 0) {
+ fw_error("error status: %d:%d\n",
+ STATUS_GET_RESPONSE(orb->status),
+ STATUS_GET_SBP_STATUS(orb->status));
+ goto out;
+ }
+
+ retval = 0;
+ out:
+ dma_unmap_single(device->card->device, orb->base.request_bus,
+ sizeof(orb->request), DMA_TO_DEVICE);
+ dma_unmap_single(device->card->device, orb->response_bus,
+ sizeof(orb->response), DMA_FROM_DEVICE);
+
+ if (response)
+ fw_memcpy_from_be32(response,
+ orb->response, sizeof(orb->response));
+ kfree(orb);
+
+ return retval;
+}
+
+static void
+complete_agent_reset_write(struct fw_card *card, int rcode,
+ void *payload, size_t length, void *data)
+{
+ struct fw_transaction *t = data;
+
+ kfree(t);
+}
+
+static int sbp2_agent_reset(struct fw_unit *unit)
+{
+ struct fw_device *device = fw_device(unit->device.parent);
+ struct sbp2_device *sd = unit->device.driver_data;
+ struct fw_transaction *t;
+ static u32 zero;
+
+ t = kzalloc(sizeof(*t), GFP_ATOMIC);
+ if (t == NULL)
+ return -ENOMEM;
+
+ fw_send_request(device->card, t, TCODE_WRITE_QUADLET_REQUEST,
+ sd->node_id, sd->generation, SCODE_400,
+ sd->command_block_agent_address + SBP2_AGENT_RESET,
+ &zero, sizeof(zero), complete_agent_reset_write, t);
+
+ return 0;
+}
+
+static void sbp2_reconnect(struct work_struct *work);
+static struct scsi_host_template scsi_driver_template;
+
+static void
+release_sbp2_device(struct kref *kref)
+{
+ struct sbp2_device *sd = container_of(kref, struct sbp2_device, kref);
+ struct Scsi_Host *host =
+ container_of((void *)sd, struct Scsi_Host, hostdata[0]);
+
+ sbp2_send_management_orb(sd->unit, sd->node_id, sd->generation,
+ SBP2_LOGOUT_REQUEST, sd->login_id, NULL);
+
+ scsi_remove_host(host);
+ fw_core_remove_address_handler(&sd->address_handler);
+ fw_notify("removed sbp2 unit %s\n", sd->unit->device.bus_id);
+ put_device(&sd->unit->device);
+ scsi_host_put(host);
+}
+
+static void sbp2_login(struct work_struct *work)
+{
+ struct sbp2_device *sd =
+ container_of(work, struct sbp2_device, work.work);
+ struct Scsi_Host *host =
+ container_of((void *)sd, struct Scsi_Host, hostdata[0]);
+ struct fw_unit *unit = sd->unit;
+ struct fw_device *device = fw_device(unit->device.parent);
+ struct sbp2_login_response response;
+ int generation, node_id, local_node_id, lun, retval;
+
+ /* FIXME: Make this work for multi-lun devices. */
+ lun = 0;
+
+ generation = device->card->generation;
+ node_id = device->node->node_id;
+ local_node_id = device->card->local_node->node_id;
+
+ if (sbp2_send_management_orb(unit, node_id, generation,
+ SBP2_LOGIN_REQUEST, lun, &response) < 0) {
+ if (sd->retries++ < 5) {
+ schedule_delayed_work(&sd->work, DIV_ROUND_UP(HZ, 5));
+ } else {
+ fw_error("failed to login to %s\n",
+ unit->device.bus_id);
+ kref_put(&sd->kref, release_sbp2_device);
+ }
+ return;
+ }
+
+ sd->generation = generation;
+ sd->node_id = node_id;
+ sd->address_high = local_node_id << 16;
+
+ /* Get command block agent offset and login id. */
+ sd->command_block_agent_address =
+ ((u64) (response.command_block_agent.high & 0xffff) << 32) |
+ response.command_block_agent.low;
+ sd->login_id = LOGIN_RESPONSE_GET_LOGIN_ID(response);
+
+ fw_notify("logged in to sbp2 unit %s (%d retries)\n",
+ unit->device.bus_id, sd->retries);
+ fw_notify(" - management_agent_address: 0x%012llx\n",
+ (unsigned long long) sd->management_agent_address);
+ fw_notify(" - command_block_agent_address: 0x%012llx\n",
+ (unsigned long long) sd->command_block_agent_address);
+ fw_notify(" - status write address: 0x%012llx\n",
+ (unsigned long long) sd->address_handler.offset);
+
+#if 0
+ /* FIXME: The linux1394 sbp2 does this last step. */
+ sbp2_set_busy_timeout(scsi_id);
+#endif
+
+ PREPARE_DELAYED_WORK(&sd->work, sbp2_reconnect);
+ sbp2_agent_reset(unit);
+
+ /* FIXME: Loop over luns here. */
+ lun = 0;
+ retval = scsi_add_device(host, 0, 0, lun);
+ if (retval < 0) {
+ sbp2_send_management_orb(unit, sd->node_id, sd->generation,
+ SBP2_LOGOUT_REQUEST, sd->login_id,
+ NULL);
+ /*
+ * Set this back to sbp2_login so we fall back and
+ * retry login on bus reset.
+ */
+ PREPARE_DELAYED_WORK(&sd->work, sbp2_login);
+ }
+ kref_put(&sd->kref, release_sbp2_device);
+}
+
+static int sbp2_probe(struct device *dev)
+{
+ struct fw_unit *unit = fw_unit(dev);
+ struct fw_device *device = fw_device(unit->device.parent);
+ struct sbp2_device *sd;
+ struct fw_csr_iterator ci;
+ struct Scsi_Host *host;
+ int i, key, value, err;
+ u32 model, firmware_revision;
+
+ err = -ENOMEM;
+ host = scsi_host_alloc(&scsi_driver_template, sizeof(*sd));
+ if (host == NULL)
+ goto fail;
+
+ sd = (struct sbp2_device *) host->hostdata;
+ unit->device.driver_data = sd;
+ sd->unit = unit;
+ INIT_LIST_HEAD(&sd->orb_list);
+ kref_init(&sd->kref);
+
+ sd->address_handler.length = 0x100;
+ sd->address_handler.address_callback = sbp2_status_write;
+ sd->address_handler.callback_data = sd;
+
+ err = fw_core_add_address_handler(&sd->address_handler,
+ &fw_high_memory_region);
+ if (err < 0)
+ goto fail_host;
+
+ err = fw_device_enable_phys_dma(device);
+ if (err < 0)
+ goto fail_address_handler;
+
+ err = scsi_add_host(host, &unit->device);
+ if (err < 0)
+ goto fail_address_handler;
+
+ /*
+ * Scan unit directory to get management agent address,
+ * firmware revison and model. Initialize firmware_revision
+ * and model to values that wont match anything in our table.
+ */
+ firmware_revision = 0xff000000;
+ model = 0xff000000;
+ fw_csr_iterator_init(&ci, unit->directory);
+ while (fw_csr_iterator_next(&ci, &key, &value)) {
+ switch (key) {
+ case CSR_DEPENDENT_INFO | CSR_OFFSET:
+ sd->management_agent_address =
+ 0xfffff0000000ULL + 4 * value;
+ break;
+ case SBP2_FIRMWARE_REVISION:
+ firmware_revision = value;
+ break;
+ case CSR_MODEL:
+ model = value;
+ break;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(sbp2_workarounds_table); i++) {
+ if (sbp2_workarounds_table[i].firmware_revision !=
+ (firmware_revision & 0xffffff00))
+ continue;
+ if (sbp2_workarounds_table[i].model != model &&
+ sbp2_workarounds_table[i].model != ~0)
+ continue;
+ sd->workarounds |= sbp2_workarounds_table[i].workarounds;
+ break;
+ }
+
+ if (sd->workarounds)
+ fw_notify("Workarounds for node %s: 0x%x "
+ "(firmware_revision 0x%06x, model_id 0x%06x)\n",
+ unit->device.bus_id,
+ sd->workarounds, firmware_revision, model);
+
+ get_device(&unit->device);
+
+ /*
+ * We schedule work to do the login so we can easily
+ * reschedule retries. Always get the ref before scheduling
+ * work.
+ */
+ INIT_DELAYED_WORK(&sd->work, sbp2_login);
+ if (schedule_delayed_work(&sd->work, 0))
+ kref_get(&sd->kref);
+
+ return 0;
+
+ fail_address_handler:
+ fw_core_remove_address_handler(&sd->address_handler);
+ fail_host:
+ scsi_host_put(host);
+ fail:
+ return err;
+}
+
+static int sbp2_remove(struct device *dev)
+{
+ struct fw_unit *unit = fw_unit(dev);
+ struct sbp2_device *sd = unit->device.driver_data;
+
+ kref_put(&sd->kref, release_sbp2_device);
+
+ return 0;
+}
+
+static void sbp2_reconnect(struct work_struct *work)
+{
+ struct sbp2_device *sd =
+ container_of(work, struct sbp2_device, work.work);
+ struct fw_unit *unit = sd->unit;
+ struct fw_device *device = fw_device(unit->device.parent);
+ int generation, node_id, local_node_id;
+
+ generation = device->card->generation;
+ node_id = device->node->node_id;
+ local_node_id = device->card->local_node->node_id;
+
+ if (sbp2_send_management_orb(unit, node_id, generation,
+ SBP2_RECONNECT_REQUEST,
+ sd->login_id, NULL) < 0) {
+ if (sd->retries++ >= 5) {
+ fw_error("failed to reconnect to %s\n",
+ unit->device.bus_id);
+ /* Fall back and try to log in again. */
+ sd->retries = 0;
+ PREPARE_DELAYED_WORK(&sd->work, sbp2_login);
+ }
+ schedule_delayed_work(&sd->work, DIV_ROUND_UP(HZ, 5));
+ return;
+ }
+
+ sd->generation = generation;
+ sd->node_id = node_id;
+ sd->address_high = local_node_id << 16;
+
+ fw_notify("reconnected to unit %s (%d retries)\n",
+ unit->device.bus_id, sd->retries);
+ sbp2_agent_reset(unit);
+ sbp2_cancel_orbs(unit);
+ kref_put(&sd->kref, release_sbp2_device);
+}
+
+static void sbp2_update(struct fw_unit *unit)
+{
+ struct fw_device *device = fw_device(unit->device.parent);
+ struct sbp2_device *sd = unit->device.driver_data;
+
+ sd->retries = 0;
+ fw_device_enable_phys_dma(device);
+ if (schedule_delayed_work(&sd->work, 0))
+ kref_get(&sd->kref);
+}
+
+#define SBP2_UNIT_SPEC_ID_ENTRY 0x0000609e
+#define SBP2_SW_VERSION_ENTRY 0x00010483
+
+static const struct fw_device_id sbp2_id_table[] = {
+ {
+ .match_flags = FW_MATCH_SPECIFIER_ID | FW_MATCH_VERSION,
+ .specifier_id = SBP2_UNIT_SPEC_ID_ENTRY,
+ .version = SBP2_SW_VERSION_ENTRY,
+ },
+ { }
+};
+
+static struct fw_driver sbp2_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = sbp2_driver_name,
+ .bus = &fw_bus_type,
+ .probe = sbp2_probe,
+ .remove = sbp2_remove,
+ },
+ .update = sbp2_update,
+ .id_table = sbp2_id_table,
+};
+
+static unsigned int
+sbp2_status_to_sense_data(u8 *sbp2_status, u8 *sense_data)
+{
+ int sam_status;
+
+ sense_data[0] = 0x70;
+ sense_data[1] = 0x0;
+ sense_data[2] = sbp2_status[1];
+ sense_data[3] = sbp2_status[4];
+ sense_data[4] = sbp2_status[5];
+ sense_data[5] = sbp2_status[6];
+ sense_data[6] = sbp2_status[7];
+ sense_data[7] = 10;
+ sense_data[8] = sbp2_status[8];
+ sense_data[9] = sbp2_status[9];
+ sense_data[10] = sbp2_status[10];
+ sense_data[11] = sbp2_status[11];
+ sense_data[12] = sbp2_status[2];
+ sense_data[13] = sbp2_status[3];
+ sense_data[14] = sbp2_status[12];
+ sense_data[15] = sbp2_status[13];
+
+ sam_status = sbp2_status[0] & 0x3f;
+
+ switch (sam_status) {
+ case SAM_STAT_GOOD:
+ case SAM_STAT_CHECK_CONDITION:
+ case SAM_STAT_CONDITION_MET:
+ case SAM_STAT_BUSY:
+ case SAM_STAT_RESERVATION_CONFLICT:
+ case SAM_STAT_COMMAND_TERMINATED:
+ return DID_OK << 16 | sam_status;
+
+ default:
+ return DID_ERROR << 16;
+ }
+}
+
+static void
+complete_command_orb(struct sbp2_orb *base_orb, struct sbp2_status *status)
+{
+ struct sbp2_command_orb *orb = (struct sbp2_command_orb *)base_orb;
+ struct fw_unit *unit = orb->unit;
+ struct fw_device *device = fw_device(unit->device.parent);
+ struct scatterlist *sg;
+ int result;
+
+ if (status != NULL) {
+ if (STATUS_GET_DEAD(*status))
+ sbp2_agent_reset(unit);
+
+ switch (STATUS_GET_RESPONSE(*status)) {
+ case SBP2_STATUS_REQUEST_COMPLETE:
+ result = DID_OK << 16;
+ break;
+ case SBP2_STATUS_TRANSPORT_FAILURE:
+ result = DID_BUS_BUSY << 16;
+ break;
+ case SBP2_STATUS_ILLEGAL_REQUEST:
+ case SBP2_STATUS_VENDOR_DEPENDENT:
+ default:
+ result = DID_ERROR << 16;
+ break;
+ }
+
+ if (result == DID_OK << 16 && STATUS_GET_LEN(*status) > 1)
+ result = sbp2_status_to_sense_data(STATUS_GET_DATA(*status),
+ orb->cmd->sense_buffer);
+ } else {
+ /*
+ * If the orb completes with status == NULL, something
+ * went wrong, typically a bus reset happened mid-orb
+ * or when sending the write (less likely).
+ */
+ result = DID_BUS_BUSY << 16;
+ }
+
+ dma_unmap_single(device->card->device, orb->base.request_bus,
+ sizeof(orb->request), DMA_TO_DEVICE);
+
+ if (orb->cmd->use_sg > 0) {
+ sg = (struct scatterlist *)orb->cmd->request_buffer;
+ dma_unmap_sg(device->card->device, sg, orb->cmd->use_sg,
+ orb->cmd->sc_data_direction);
+ }
+
+ if (orb->page_table_bus != 0)
+ dma_unmap_single(device->card->device, orb->page_table_bus,
+ sizeof(orb->page_table_bus), DMA_TO_DEVICE);
+
+ if (orb->request_buffer_bus != 0)
+ dma_unmap_single(device->card->device, orb->request_buffer_bus,
+ sizeof(orb->request_buffer_bus),
+ DMA_FROM_DEVICE);
+
+ orb->cmd->result = result;
+ orb->done(orb->cmd);
+ kfree(orb);
+}
+
+static int sbp2_command_orb_map_scatterlist(struct sbp2_command_orb *orb)
+{
+ struct sbp2_device *sd =
+ (struct sbp2_device *)orb->cmd->device->host->hostdata;
+ struct fw_unit *unit = sd->unit;
+ struct fw_device *device = fw_device(unit->device.parent);
+ struct scatterlist *sg;
+ int sg_len, l, i, j, count;
+ size_t size;
+ dma_addr_t sg_addr;
+
+ sg = (struct scatterlist *)orb->cmd->request_buffer;
+ count = dma_map_sg(device->card->device, sg, orb->cmd->use_sg,
+ orb->cmd->sc_data_direction);
+ if (count == 0)
+ goto fail;
+
+ /*
+ * Handle the special case where there is only one element in
+ * the scatter list by converting it to an immediate block
+ * request. This is also a workaround for broken devices such
+ * as the second generation iPod which doesn't support page
+ * tables.
+ */
+ if (count == 1 && sg_dma_len(sg) < SBP2_MAX_SG_ELEMENT_LENGTH) {
+ orb->request.data_descriptor.high = sd->address_high;
+ orb->request.data_descriptor.low = sg_dma_address(sg);
+ orb->request.misc |=
+ COMMAND_ORB_DATA_SIZE(sg_dma_len(sg));
+ return 0;
+ }
+
+ /*
+ * Convert the scatterlist to an sbp2 page table. If any
+ * scatterlist entries are too big for sbp2, we split them as we
+ * go. Even if we ask the block I/O layer to not give us sg
+ * elements larger than 65535 bytes, some IOMMUs may merge sg elements
+ * during DMA mapping, and Linux currently doesn't prevent this.
+ */
+ for (i = 0, j = 0; i < count; i++) {
+ sg_len = sg_dma_len(sg + i);
+ sg_addr = sg_dma_address(sg + i);
+ while (sg_len) {
+ l = min(sg_len, SBP2_MAX_SG_ELEMENT_LENGTH);
+ orb->page_table[j].low = sg_addr;
+ orb->page_table[j].high = (l << 16);
+ sg_addr += l;
+ sg_len -= l;
+ j++;
+ }
+ }
+
+ size = sizeof(orb->page_table[0]) * j;
+
+ /*
+ * The data_descriptor pointer is the one case where we need
+ * to fill in the node ID part of the address. All other
+ * pointers assume that the data referenced reside on the
+ * initiator (i.e. us), but data_descriptor can refer to data
+ * on other nodes so we need to put our ID in descriptor.high.
+ */
+
+ orb->page_table_bus =
+ dma_map_single(device->card->device, orb->page_table,
+ size, DMA_TO_DEVICE);
+ if (dma_mapping_error(orb->page_table_bus))
+ goto fail_page_table;
+ orb->request.data_descriptor.high = sd->address_high;
+ orb->request.data_descriptor.low = orb->page_table_bus;
+ orb->request.misc |=
+ COMMAND_ORB_PAGE_TABLE_PRESENT |
+ COMMAND_ORB_DATA_SIZE(j);
+
+ fw_memcpy_to_be32(orb->page_table, orb->page_table, size);
+
+ return 0;
+
+ fail_page_table:
+ dma_unmap_sg(device->card->device, sg, orb->cmd->use_sg,
+ orb->cmd->sc_data_direction);
+ fail:
+ return -ENOMEM;
+}
+
+/* SCSI stack integration */
+
+static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done)
+{
+ struct sbp2_device *sd =
+ (struct sbp2_device *)cmd->device->host->hostdata;
+ struct fw_unit *unit = sd->unit;
+ struct fw_device *device = fw_device(unit->device.parent);
+ struct sbp2_command_orb *orb;
+
+ /*
+ * Bidirectional commands are not yet implemented, and unknown
+ * transfer direction not handled.
+ */
+ if (cmd->sc_data_direction == DMA_BIDIRECTIONAL) {
+ fw_error("Cannot handle DMA_BIDIRECTIONAL - rejecting command");
+ cmd->result = DID_ERROR << 16;
+ done(cmd);
+ return 0;
+ }
+
+ orb = kzalloc(sizeof(*orb), GFP_ATOMIC);
+ if (orb == NULL) {
+ fw_notify("failed to alloc orb\n");
+ goto fail_alloc;
+ }
+
+ /* Initialize rcode to something not RCODE_COMPLETE. */
+ orb->base.rcode = -1;
+ orb->base.request_bus =
+ dma_map_single(device->card->device, &orb->request,
+ sizeof(orb->request), DMA_TO_DEVICE);
+ if (dma_mapping_error(orb->base.request_bus))
+ goto fail_mapping;
+
+ orb->unit = unit;
+ orb->done = done;
+ orb->cmd = cmd;
+
+ orb->request.next.high = SBP2_ORB_NULL;
+ orb->request.next.low = 0x0;
+ /*
+ * At speed 100 we can do 512 bytes per packet, at speed 200,
+ * 1024 bytes per packet etc. The SBP-2 max_payload field
+ * specifies the max payload size as 2 ^ (max_payload + 2), so
+ * if we set this to max_speed + 7, we get the right value.
+ */
+ orb->request.misc =
+ COMMAND_ORB_MAX_PAYLOAD(device->node->max_speed + 7) |
+ COMMAND_ORB_SPEED(device->node->max_speed) |
+ COMMAND_ORB_NOTIFY;
+
+ if (cmd->sc_data_direction == DMA_FROM_DEVICE)
+ orb->request.misc |=
+ COMMAND_ORB_DIRECTION(SBP2_DIRECTION_FROM_MEDIA);
+ else if (cmd->sc_data_direction == DMA_TO_DEVICE)
+ orb->request.misc |=
+ COMMAND_ORB_DIRECTION(SBP2_DIRECTION_TO_MEDIA);
+
+ if (cmd->use_sg && sbp2_command_orb_map_scatterlist(orb) < 0)
+ goto fail_map_payload;
+
+ fw_memcpy_to_be32(&orb->request, &orb->request, sizeof(orb->request));
+
+ memset(orb->request.command_block,
+ 0, sizeof(orb->request.command_block));
+ memcpy(orb->request.command_block, cmd->cmnd, COMMAND_SIZE(*cmd->cmnd));
+
+ orb->base.callback = complete_command_orb;
+
+ sbp2_send_orb(&orb->base, unit, sd->node_id, sd->generation,
+ sd->command_block_agent_address + SBP2_ORB_POINTER);
+
+ return 0;
+
+ fail_map_payload:
+ dma_unmap_single(device->card->device, orb->base.request_bus,
+ sizeof(orb->request), DMA_TO_DEVICE);
+ fail_mapping:
+ kfree(orb);
+ fail_alloc:
+ return SCSI_MLQUEUE_HOST_BUSY;
+}
+
+static int sbp2_scsi_slave_alloc(struct scsi_device *sdev)
+{
+ struct sbp2_device *sd = (struct sbp2_device *)sdev->host->hostdata;
+
+ sdev->allow_restart = 1;
+
+ if (sd->workarounds & SBP2_WORKAROUND_INQUIRY_36)
+ sdev->inquiry_len = 36;
+ return 0;
+}
+
+static int sbp2_scsi_slave_configure(struct scsi_device *sdev)
+{
+ struct sbp2_device *sd = (struct sbp2_device *)sdev->host->hostdata;
+ struct fw_unit *unit = sd->unit;
+
+ sdev->use_10_for_rw = 1;
+
+ if (sdev->type == TYPE_ROM)
+ sdev->use_10_for_ms = 1;
+ if (sdev->type == TYPE_DISK &&
+ sd->workarounds & SBP2_WORKAROUND_MODE_SENSE_8)
+ sdev->skip_ms_page_8 = 1;
+ if (sd->workarounds & SBP2_WORKAROUND_FIX_CAPACITY) {
+ fw_notify("setting fix_capacity for %s\n", unit->device.bus_id);
+ sdev->fix_capacity = 1;
+ }
+
+ return 0;
+}
+
+/*
+ * Called by scsi stack when something has really gone wrong. Usually
+ * called when a command has timed-out for some reason.
+ */
+static int sbp2_scsi_abort(struct scsi_cmnd *cmd)
+{
+ struct sbp2_device *sd =
+ (struct sbp2_device *)cmd->device->host->hostdata;
+ struct fw_unit *unit = sd->unit;
+
+ fw_notify("sbp2_scsi_abort\n");
+ sbp2_agent_reset(unit);
+ sbp2_cancel_orbs(unit);
+
+ return SUCCESS;
+}
+
+static struct scsi_host_template scsi_driver_template = {
+ .module = THIS_MODULE,
+ .name = "SBP-2 IEEE-1394",
+ .proc_name = (char *)sbp2_driver_name,
+ .queuecommand = sbp2_scsi_queuecommand,
+ .slave_alloc = sbp2_scsi_slave_alloc,
+ .slave_configure = sbp2_scsi_slave_configure,
+ .eh_abort_handler = sbp2_scsi_abort,
+ .this_id = -1,
+ .sg_tablesize = SG_ALL,
+ .use_clustering = ENABLE_CLUSTERING,
+ .cmd_per_lun = 1,
+ .can_queue = 1,
+};
+
+MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
+MODULE_DESCRIPTION("SCSI over IEEE1394");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(ieee1394, sbp2_id_table);
+
+/* Provide a module alias so root-on-sbp2 initrds don't break. */
+#ifndef CONFIG_IEEE1394_SBP2_MODULE
+MODULE_ALIAS("sbp2");
+#endif
+
+static int __init sbp2_init(void)
+{
+ return driver_register(&sbp2_driver.driver);
+}
+
+static void __exit sbp2_cleanup(void)
+{
+ driver_unregister(&sbp2_driver.driver);
+}
+
+module_init(sbp2_init);
+module_exit(sbp2_cleanup);
diff --git a/drivers/firewire/fw-topology.c b/drivers/firewire/fw-topology.c
new file mode 100644
index 00000000000..7aebb8ae0ef
--- /dev/null
+++ b/drivers/firewire/fw-topology.c
@@ -0,0 +1,537 @@
+/*
+ * Incremental bus scan, based on bus topology
+ *
+ * Copyright (C) 2004-2006 Kristian Hoegsberg <krh@bitplanet.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/wait.h>
+#include <linux/errno.h>
+#include "fw-transaction.h"
+#include "fw-topology.h"
+
+#define SELF_ID_PHY_ID(q) (((q) >> 24) & 0x3f)
+#define SELF_ID_EXTENDED(q) (((q) >> 23) & 0x01)
+#define SELF_ID_LINK_ON(q) (((q) >> 22) & 0x01)
+#define SELF_ID_GAP_COUNT(q) (((q) >> 16) & 0x3f)
+#define SELF_ID_PHY_SPEED(q) (((q) >> 14) & 0x03)
+#define SELF_ID_CONTENDER(q) (((q) >> 11) & 0x01)
+#define SELF_ID_PHY_INITIATOR(q) (((q) >> 1) & 0x01)
+#define SELF_ID_MORE_PACKETS(q) (((q) >> 0) & 0x01)
+
+#define SELF_ID_EXT_SEQUENCE(q) (((q) >> 20) & 0x07)
+
+static u32 *count_ports(u32 *sid, int *total_port_count, int *child_port_count)
+{
+ u32 q;
+ int port_type, shift, seq;
+
+ *total_port_count = 0;
+ *child_port_count = 0;
+
+ shift = 6;
+ q = *sid;
+ seq = 0;
+
+ while (1) {
+ port_type = (q >> shift) & 0x03;
+ switch (port_type) {
+ case SELFID_PORT_CHILD:
+ (*child_port_count)++;
+ case SELFID_PORT_PARENT:
+ case SELFID_PORT_NCONN:
+ (*total_port_count)++;
+ case SELFID_PORT_NONE:
+ break;
+ }
+
+ shift -= 2;
+ if (shift == 0) {
+ if (!SELF_ID_MORE_PACKETS(q))
+ return sid + 1;
+
+ shift = 16;
+ sid++;
+ q = *sid;
+
+ /*
+ * Check that the extra packets actually are
+ * extended self ID packets and that the
+ * sequence numbers in the extended self ID
+ * packets increase as expected.
+ */
+
+ if (!SELF_ID_EXTENDED(q) ||
+ seq != SELF_ID_EXT_SEQUENCE(q))
+ return NULL;
+
+ seq++;
+ }
+ }
+}
+
+static int get_port_type(u32 *sid, int port_index)
+{
+ int index, shift;
+
+ index = (port_index + 5) / 8;
+ shift = 16 - ((port_index + 5) & 7) * 2;
+ return (sid[index] >> shift) & 0x03;
+}
+
+static struct fw_node *fw_node_create(u32 sid, int port_count, int color)
+{
+ struct fw_node *node;
+
+ node = kzalloc(sizeof(*node) + port_count * sizeof(node->ports[0]),
+ GFP_ATOMIC);
+ if (node == NULL)
+ return NULL;
+
+ node->color = color;
+ node->node_id = LOCAL_BUS | SELF_ID_PHY_ID(sid);
+ node->link_on = SELF_ID_LINK_ON(sid);
+ node->phy_speed = SELF_ID_PHY_SPEED(sid);
+ node->port_count = port_count;
+
+ atomic_set(&node->ref_count, 1);
+ INIT_LIST_HEAD(&node->link);
+
+ return node;
+}
+
+/*
+ * Compute the maximum hop count for this node and it's children. The
+ * maximum hop count is the maximum number of connections between any
+ * two nodes in the subtree rooted at this node. We need this for
+ * setting the gap count. As we build the tree bottom up in
+ * build_tree() below, this is fairly easy to do: for each node we
+ * maintain the max hop count and the max depth, ie the number of hops
+ * to the furthest leaf. Computing the max hop count breaks down into
+ * two cases: either the path goes through this node, in which case
+ * the hop count is the sum of the two biggest child depths plus 2.
+ * Or it could be the case that the max hop path is entirely
+ * containted in a child tree, in which case the max hop count is just
+ * the max hop count of this child.
+ */
+static void update_hop_count(struct fw_node *node)
+{
+ int depths[2] = { -1, -1 };
+ int max_child_hops = 0;
+ int i;
+
+ for (i = 0; i < node->port_count; i++) {
+ if (node->ports[i].node == NULL)
+ continue;
+
+ if (node->ports[i].node->max_hops > max_child_hops)
+ max_child_hops = node->ports[i].node->max_hops;
+
+ if (node->ports[i].node->max_depth > depths[0]) {
+ depths[1] = depths[0];
+ depths[0] = node->ports[i].node->max_depth;
+ } else if (node->ports[i].node->max_depth > depths[1])
+ depths[1] = node->ports[i].node->max_depth;
+ }
+
+ node->max_depth = depths[0] + 1;
+ node->max_hops = max(max_child_hops, depths[0] + depths[1] + 2);
+}
+
+
+/**
+ * build_tree - Build the tree representation of the topology
+ * @self_ids: array of self IDs to create the tree from
+ * @self_id_count: the length of the self_ids array
+ * @local_id: the node ID of the local node
+ *
+ * This function builds the tree representation of the topology given
+ * by the self IDs from the latest bus reset. During the construction
+ * of the tree, the function checks that the self IDs are valid and
+ * internally consistent. On succcess this funtions returns the
+ * fw_node corresponding to the local card otherwise NULL.
+ */
+static struct fw_node *build_tree(struct fw_card *card,
+ u32 *sid, int self_id_count)
+{
+ struct fw_node *node, *child, *local_node, *irm_node;
+ struct list_head stack, *h;
+ u32 *next_sid, *end, q;
+ int i, port_count, child_port_count, phy_id, parent_count, stack_depth;
+ int gap_count, topology_type;
+
+ local_node = NULL;
+ node = NULL;
+ INIT_LIST_HEAD(&stack);
+ stack_depth = 0;
+ end = sid + self_id_count;
+ phy_id = 0;
+ irm_node = NULL;
+ gap_count = SELF_ID_GAP_COUNT(*sid);
+ topology_type = 0;
+
+ while (sid < end) {
+ next_sid = count_ports(sid, &port_count, &child_port_count);
+
+ if (next_sid == NULL) {
+ fw_error("Inconsistent extended self IDs.\n");
+ return NULL;
+ }
+
+ q = *sid;
+ if (phy_id != SELF_ID_PHY_ID(q)) {
+ fw_error("PHY ID mismatch in self ID: %d != %d.\n",
+ phy_id, SELF_ID_PHY_ID(q));
+ return NULL;
+ }
+
+ if (child_port_count > stack_depth) {
+ fw_error("Topology stack underflow\n");
+ return NULL;
+ }
+
+ /*
+ * Seek back from the top of our stack to find the
+ * start of the child nodes for this node.
+ */
+ for (i = 0, h = &stack; i < child_port_count; i++)
+ h = h->prev;
+ child = fw_node(h);
+
+ node = fw_node_create(q, port_count, card->color);
+ if (node == NULL) {
+ fw_error("Out of memory while building topology.");
+ return NULL;
+ }
+
+ if (phy_id == (card->node_id & 0x3f))
+ local_node = node;
+
+ if (SELF_ID_CONTENDER(q))
+ irm_node = node;
+
+ if (node->phy_speed == SCODE_BETA)
+ topology_type |= FW_TOPOLOGY_B;
+ else
+ topology_type |= FW_TOPOLOGY_A;
+
+ parent_count = 0;
+
+ for (i = 0; i < port_count; i++) {
+ switch (get_port_type(sid, i)) {
+ case SELFID_PORT_PARENT:
+ /*
+ * Who's your daddy? We dont know the
+ * parent node at this time, so we
+ * temporarily abuse node->color for
+ * remembering the entry in the
+ * node->ports array where the parent
+ * node should be. Later, when we
+ * handle the parent node, we fix up
+ * the reference.
+ */
+ parent_count++;
+ node->color = i;
+ break;
+
+ case SELFID_PORT_CHILD:
+ node->ports[i].node = child;
+ /*
+ * Fix up parent reference for this
+ * child node.
+ */
+ child->ports[child->color].node = node;
+ child->color = card->color;
+ child = fw_node(child->link.next);
+ break;
+ }
+ }
+
+ /*
+ * Check that the node reports exactly one parent
+ * port, except for the root, which of course should
+ * have no parents.
+ */
+ if ((next_sid == end && parent_count != 0) ||
+ (next_sid < end && parent_count != 1)) {
+ fw_error("Parent port inconsistency for node %d: "
+ "parent_count=%d\n", phy_id, parent_count);
+ return NULL;
+ }
+
+ /* Pop the child nodes off the stack and push the new node. */
+ __list_del(h->prev, &stack);
+ list_add_tail(&node->link, &stack);
+ stack_depth += 1 - child_port_count;
+
+ /*
+ * If all PHYs does not report the same gap count
+ * setting, we fall back to 63 which will force a gap
+ * count reconfiguration and a reset.
+ */
+ if (SELF_ID_GAP_COUNT(q) != gap_count)
+ gap_count = 63;
+
+ update_hop_count(node);
+
+ sid = next_sid;
+ phy_id++;
+ }
+
+ card->root_node = node;
+ card->irm_node = irm_node;
+ card->gap_count = gap_count;
+ card->topology_type = topology_type;
+
+ return local_node;
+}
+
+typedef void (*fw_node_callback_t)(struct fw_card * card,
+ struct fw_node * node,
+ struct fw_node * parent);
+
+static void
+for_each_fw_node(struct fw_card *card, struct fw_node *root,
+ fw_node_callback_t callback)
+{
+ struct list_head list;
+ struct fw_node *node, *next, *child, *parent;
+ int i;
+
+ INIT_LIST_HEAD(&list);
+
+ fw_node_get(root);
+ list_add_tail(&root->link, &list);
+ parent = NULL;
+ list_for_each_entry(node, &list, link) {
+ node->color = card->color;
+
+ for (i = 0; i < node->port_count; i++) {
+ child = node->ports[i].node;
+ if (!child)
+ continue;
+ if (child->color == card->color)
+ parent = child;
+ else {
+ fw_node_get(child);
+ list_add_tail(&child->link, &list);
+ }
+ }
+
+ callback(card, node, parent);
+ }
+
+ list_for_each_entry_safe(node, next, &list, link)
+ fw_node_put(node);
+}
+
+static void
+report_lost_node(struct fw_card *card,
+ struct fw_node *node, struct fw_node *parent)
+{
+ fw_node_event(card, node, FW_NODE_DESTROYED);
+ fw_node_put(node);
+}
+
+static void
+report_found_node(struct fw_card *card,
+ struct fw_node *node, struct fw_node *parent)
+{
+ int b_path = (node->phy_speed == SCODE_BETA);
+
+ if (parent != NULL) {
+ /* min() macro doesn't work here with gcc 3.4 */
+ node->max_speed = parent->max_speed < node->phy_speed ?
+ parent->max_speed : node->phy_speed;
+ node->b_path = parent->b_path && b_path;
+ } else {
+ node->max_speed = node->phy_speed;
+ node->b_path = b_path;
+ }
+
+ fw_node_event(card, node, FW_NODE_CREATED);
+}
+
+void fw_destroy_nodes(struct fw_card *card)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&card->lock, flags);
+ card->color++;
+ if (card->local_node != NULL)
+ for_each_fw_node(card, card->local_node, report_lost_node);
+ spin_unlock_irqrestore(&card->lock, flags);
+}
+
+static void move_tree(struct fw_node *node0, struct fw_node *node1, int port)
+{
+ struct fw_node *tree;
+ int i;
+
+ tree = node1->ports[port].node;
+ node0->ports[port].node = tree;
+ for (i = 0; i < tree->port_count; i++) {
+ if (tree->ports[i].node == node1) {
+ tree->ports[i].node = node0;
+ break;
+ }
+ }
+}
+
+/**
+ * update_tree - compare the old topology tree for card with the new
+ * one specified by root. Queue the nodes and mark them as either
+ * found, lost or updated. Update the nodes in the card topology tree
+ * as we go.
+ */
+static void
+update_tree(struct fw_card *card, struct fw_node *root)
+{
+ struct list_head list0, list1;
+ struct fw_node *node0, *node1;
+ int i, event;
+
+ INIT_LIST_HEAD(&list0);
+ list_add_tail(&card->local_node->link, &list0);
+ INIT_LIST_HEAD(&list1);
+ list_add_tail(&root->link, &list1);
+
+ node0 = fw_node(list0.next);
+ node1 = fw_node(list1.next);
+
+ while (&node0->link != &list0) {
+
+ /* assert(node0->port_count == node1->port_count); */
+ if (node0->link_on && !node1->link_on)
+ event = FW_NODE_LINK_OFF;
+ else if (!node0->link_on && node1->link_on)
+ event = FW_NODE_LINK_ON;
+ else
+ event = FW_NODE_UPDATED;
+
+ node0->node_id = node1->node_id;
+ node0->color = card->color;
+ node0->link_on = node1->link_on;
+ node0->initiated_reset = node1->initiated_reset;
+ node0->max_hops = node1->max_hops;
+ node1->color = card->color;
+ fw_node_event(card, node0, event);
+
+ if (card->root_node == node1)
+ card->root_node = node0;
+ if (card->irm_node == node1)
+ card->irm_node = node0;
+
+ for (i = 0; i < node0->port_count; i++) {
+ if (node0->ports[i].node && node1->ports[i].node) {
+ /*
+ * This port didn't change, queue the
+ * connected node for further
+ * investigation.
+ */
+ if (node0->ports[i].node->color == card->color)
+ continue;
+ list_add_tail(&node0->ports[i].node->link,
+ &list0);
+ list_add_tail(&node1->ports[i].node->link,
+ &list1);
+ } else if (node0->ports[i].node) {
+ /*
+ * The nodes connected here were
+ * unplugged; unref the lost nodes and
+ * queue FW_NODE_LOST callbacks for
+ * them.
+ */
+
+ for_each_fw_node(card, node0->ports[i].node,
+ report_lost_node);
+ node0->ports[i].node = NULL;
+ } else if (node1->ports[i].node) {
+ /*
+ * One or more node were connected to
+ * this port. Move the new nodes into
+ * the tree and queue FW_NODE_CREATED
+ * callbacks for them.
+ */
+ move_tree(node0, node1, i);
+ for_each_fw_node(card, node0->ports[i].node,
+ report_found_node);
+ }
+ }
+
+ node0 = fw_node(node0->link.next);
+ node1 = fw_node(node1->link.next);
+ }
+}
+
+static void
+update_topology_map(struct fw_card *card, u32 *self_ids, int self_id_count)
+{
+ int node_count;
+
+ card->topology_map[1]++;
+ node_count = (card->root_node->node_id & 0x3f) + 1;
+ card->topology_map[2] = (node_count << 16) | self_id_count;
+ card->topology_map[0] = (self_id_count + 2) << 16;
+ memcpy(&card->topology_map[3], self_ids, self_id_count * 4);
+ fw_compute_block_crc(card->topology_map);
+}
+
+void
+fw_core_handle_bus_reset(struct fw_card *card,
+ int node_id, int generation,
+ int self_id_count, u32 * self_ids)
+{
+ struct fw_node *local_node;
+ unsigned long flags;
+
+ fw_flush_transactions(card);
+
+ spin_lock_irqsave(&card->lock, flags);
+
+ /*
+ * If the new topology has a different self_id_count the topology
+ * changed, either nodes were added or removed. In that case we
+ * reset the IRM reset counter.
+ */
+ if (card->self_id_count != self_id_count)
+ card->bm_retries = 0;
+
+ card->node_id = node_id;
+ card->generation = generation;
+ card->reset_jiffies = jiffies;
+ schedule_delayed_work(&card->work, 0);
+
+ local_node = build_tree(card, self_ids, self_id_count);
+
+ update_topology_map(card, self_ids, self_id_count);
+
+ card->color++;
+
+ if (local_node == NULL) {
+ fw_error("topology build failed\n");
+ /* FIXME: We need to issue a bus reset in this case. */
+ } else if (card->local_node == NULL) {
+ card->local_node = local_node;
+ for_each_fw_node(card, local_node, report_found_node);
+ } else {
+ update_tree(card, local_node);
+ }
+
+ spin_unlock_irqrestore(&card->lock, flags);
+}
+EXPORT_SYMBOL(fw_core_handle_bus_reset);
diff --git a/drivers/firewire/fw-topology.h b/drivers/firewire/fw-topology.h
new file mode 100644
index 00000000000..363b6cbcd0b
--- /dev/null
+++ b/drivers/firewire/fw-topology.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef __fw_topology_h
+#define __fw_topology_h
+
+enum {
+ FW_TOPOLOGY_A = 0x01,
+ FW_TOPOLOGY_B = 0x02,
+ FW_TOPOLOGY_MIXED = 0x03,
+};
+
+enum {
+ FW_NODE_CREATED = 0x00,
+ FW_NODE_UPDATED = 0x01,
+ FW_NODE_DESTROYED = 0x02,
+ FW_NODE_LINK_ON = 0x03,
+ FW_NODE_LINK_OFF = 0x04,
+};
+
+struct fw_port {
+ struct fw_node *node;
+ unsigned speed : 3; /* S100, S200, ... S3200 */
+};
+
+struct fw_node {
+ u16 node_id;
+ u8 color;
+ u8 port_count;
+ unsigned link_on : 1;
+ unsigned initiated_reset : 1;
+ unsigned b_path : 1;
+ u8 phy_speed : 3; /* As in the self ID packet. */
+ u8 max_speed : 5; /* Minimum of all phy-speeds and port speeds on
+ * the path from the local node to this node. */
+ u8 max_depth : 4; /* Maximum depth to any leaf node */
+ u8 max_hops : 4; /* Max hops in this sub tree */
+ atomic_t ref_count;
+
+ /* For serializing node topology into a list. */
+ struct list_head link;
+
+ /* Upper layer specific data. */
+ void *data;
+
+ struct fw_port ports[0];
+};
+
+static inline struct fw_node *
+fw_node(struct list_head *l)
+{
+ return list_entry(l, struct fw_node, link);
+}
+
+static inline struct fw_node *
+fw_node_get(struct fw_node *node)
+{
+ atomic_inc(&node->ref_count);
+
+ return node;
+}
+
+static inline void
+fw_node_put(struct fw_node *node)
+{
+ if (atomic_dec_and_test(&node->ref_count))
+ kfree(node);
+}
+
+void
+fw_destroy_nodes(struct fw_card *card);
+
+int
+fw_compute_block_crc(u32 *block);
+
+
+#endif /* __fw_topology_h */
diff --git a/drivers/firewire/fw-transaction.c b/drivers/firewire/fw-transaction.c
new file mode 100644
index 00000000000..80d0121463d
--- /dev/null
+++ b/drivers/firewire/fw-transaction.c
@@ -0,0 +1,910 @@
+/*
+ * Core IEEE1394 transaction logic
+ *
+ * Copyright (C) 2004-2006 Kristian Hoegsberg <krh@bitplanet.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/poll.h>
+#include <linux/list.h>
+#include <linux/kthread.h>
+#include <asm/uaccess.h>
+#include <asm/semaphore.h>
+
+#include "fw-transaction.h"
+#include "fw-topology.h"
+#include "fw-device.h"
+
+#define HEADER_PRI(pri) ((pri) << 0)
+#define HEADER_TCODE(tcode) ((tcode) << 4)
+#define HEADER_RETRY(retry) ((retry) << 8)
+#define HEADER_TLABEL(tlabel) ((tlabel) << 10)
+#define HEADER_DESTINATION(destination) ((destination) << 16)
+#define HEADER_SOURCE(source) ((source) << 16)
+#define HEADER_RCODE(rcode) ((rcode) << 12)
+#define HEADER_OFFSET_HIGH(offset_high) ((offset_high) << 0)
+#define HEADER_DATA_LENGTH(length) ((length) << 16)
+#define HEADER_EXTENDED_TCODE(tcode) ((tcode) << 0)
+
+#define HEADER_GET_TCODE(q) (((q) >> 4) & 0x0f)
+#define HEADER_GET_TLABEL(q) (((q) >> 10) & 0x3f)
+#define HEADER_GET_RCODE(q) (((q) >> 12) & 0x0f)
+#define HEADER_GET_DESTINATION(q) (((q) >> 16) & 0xffff)
+#define HEADER_GET_SOURCE(q) (((q) >> 16) & 0xffff)
+#define HEADER_GET_OFFSET_HIGH(q) (((q) >> 0) & 0xffff)
+#define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff)
+#define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff)
+
+#define PHY_CONFIG_GAP_COUNT(gap_count) (((gap_count) << 16) | (1 << 22))
+#define PHY_CONFIG_ROOT_ID(node_id) ((((node_id) & 0x3f) << 24) | (1 << 23))
+#define PHY_IDENTIFIER(id) ((id) << 30)
+
+static int
+close_transaction(struct fw_transaction *transaction,
+ struct fw_card *card, int rcode,
+ u32 *payload, size_t length)
+{
+ struct fw_transaction *t;
+ unsigned long flags;
+
+ spin_lock_irqsave(&card->lock, flags);
+ list_for_each_entry(t, &card->transaction_list, link) {
+ if (t == transaction) {
+ list_del(&t->link);
+ card->tlabel_mask &= ~(1 << t->tlabel);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&card->lock, flags);
+
+ if (&t->link != &card->transaction_list) {
+ t->callback(card, rcode, payload, length, t->callback_data);
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
+/*
+ * Only valid for transactions that are potentially pending (ie have
+ * been sent).
+ */
+int
+fw_cancel_transaction(struct fw_card *card,
+ struct fw_transaction *transaction)
+{
+ /*
+ * Cancel the packet transmission if it's still queued. That
+ * will call the packet transmission callback which cancels
+ * the transaction.
+ */
+
+ if (card->driver->cancel_packet(card, &transaction->packet) == 0)
+ return 0;
+
+ /*
+ * If the request packet has already been sent, we need to see
+ * if the transaction is still pending and remove it in that case.
+ */
+
+ return close_transaction(transaction, card, RCODE_CANCELLED, NULL, 0);
+}
+EXPORT_SYMBOL(fw_cancel_transaction);
+
+static void
+transmit_complete_callback(struct fw_packet *packet,
+ struct fw_card *card, int status)
+{
+ struct fw_transaction *t =
+ container_of(packet, struct fw_transaction, packet);
+
+ switch (status) {
+ case ACK_COMPLETE:
+ close_transaction(t, card, RCODE_COMPLETE, NULL, 0);
+ break;
+ case ACK_PENDING:
+ t->timestamp = packet->timestamp;
+ break;
+ case ACK_BUSY_X:
+ case ACK_BUSY_A:
+ case ACK_BUSY_B:
+ close_transaction(t, card, RCODE_BUSY, NULL, 0);
+ break;
+ case ACK_DATA_ERROR:
+ close_transaction(t, card, RCODE_DATA_ERROR, NULL, 0);
+ break;
+ case ACK_TYPE_ERROR:
+ close_transaction(t, card, RCODE_TYPE_ERROR, NULL, 0);
+ break;
+ default:
+ /*
+ * In this case the ack is really a juju specific
+ * rcode, so just forward that to the callback.
+ */
+ close_transaction(t, card, status, NULL, 0);
+ break;
+ }
+}
+
+static void
+fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
+ int node_id, int source_id, int generation, int speed,
+ unsigned long long offset, void *payload, size_t length)
+{
+ int ext_tcode;
+
+ if (tcode > 0x10) {
+ ext_tcode = tcode - 0x10;
+ tcode = TCODE_LOCK_REQUEST;
+ } else
+ ext_tcode = 0;
+
+ packet->header[0] =
+ HEADER_RETRY(RETRY_X) |
+ HEADER_TLABEL(tlabel) |
+ HEADER_TCODE(tcode) |
+ HEADER_DESTINATION(node_id);
+ packet->header[1] =
+ HEADER_OFFSET_HIGH(offset >> 32) | HEADER_SOURCE(source_id);
+ packet->header[2] =
+ offset;
+
+ switch (tcode) {
+ case TCODE_WRITE_QUADLET_REQUEST:
+ packet->header[3] = *(u32 *)payload;
+ packet->header_length = 16;
+ packet->payload_length = 0;
+ break;
+
+ case TCODE_LOCK_REQUEST:
+ case TCODE_WRITE_BLOCK_REQUEST:
+ packet->header[3] =
+ HEADER_DATA_LENGTH(length) |
+ HEADER_EXTENDED_TCODE(ext_tcode);
+ packet->header_length = 16;
+ packet->payload = payload;
+ packet->payload_length = length;
+ break;
+
+ case TCODE_READ_QUADLET_REQUEST:
+ packet->header_length = 12;
+ packet->payload_length = 0;
+ break;
+
+ case TCODE_READ_BLOCK_REQUEST:
+ packet->header[3] =
+ HEADER_DATA_LENGTH(length) |
+ HEADER_EXTENDED_TCODE(ext_tcode);
+ packet->header_length = 16;
+ packet->payload_length = 0;
+ break;
+ }
+
+ packet->speed = speed;
+ packet->generation = generation;
+ packet->ack = 0;
+}
+
+/**
+ * This function provides low-level access to the IEEE1394 transaction
+ * logic. Most C programs would use either fw_read(), fw_write() or
+ * fw_lock() instead - those function are convenience wrappers for
+ * this function. The fw_send_request() function is primarily
+ * provided as a flexible, one-stop entry point for languages bindings
+ * and protocol bindings.
+ *
+ * FIXME: Document this function further, in particular the possible
+ * values for rcode in the callback. In short, we map ACK_COMPLETE to
+ * RCODE_COMPLETE, internal errors set errno and set rcode to
+ * RCODE_SEND_ERROR (which is out of range for standard ieee1394
+ * rcodes). All other rcodes are forwarded unchanged. For all
+ * errors, payload is NULL, length is 0.
+ *
+ * Can not expect the callback to be called before the function
+ * returns, though this does happen in some cases (ACK_COMPLETE and
+ * errors).
+ *
+ * The payload is only used for write requests and must not be freed
+ * until the callback has been called.
+ *
+ * @param card the card from which to send the request
+ * @param tcode the tcode for this transaction. Do not use
+ * TCODE_LOCK_REQUEST directly, insted use TCODE_LOCK_MASK_SWAP
+ * etc. to specify tcode and ext_tcode.
+ * @param node_id the destination node ID (bus ID and PHY ID concatenated)
+ * @param generation the generation for which node_id is valid
+ * @param speed the speed to use for sending the request
+ * @param offset the 48 bit offset on the destination node
+ * @param payload the data payload for the request subaction
+ * @param length the length in bytes of the data to read
+ * @param callback function to be called when the transaction is completed
+ * @param callback_data pointer to arbitrary data, which will be
+ * passed to the callback
+ */
+void
+fw_send_request(struct fw_card *card, struct fw_transaction *t,
+ int tcode, int node_id, int generation, int speed,
+ unsigned long long offset,
+ void *payload, size_t length,
+ fw_transaction_callback_t callback, void *callback_data)
+{
+ unsigned long flags;
+ int tlabel, source;
+
+ /*
+ * Bump the flush timer up 100ms first of all so we
+ * don't race with a flush timer callback.
+ */
+
+ mod_timer(&card->flush_timer, jiffies + DIV_ROUND_UP(HZ, 10));
+
+ /*
+ * Allocate tlabel from the bitmap and put the transaction on
+ * the list while holding the card spinlock.
+ */
+
+ spin_lock_irqsave(&card->lock, flags);
+
+ source = card->node_id;
+ tlabel = card->current_tlabel;
+ if (card->tlabel_mask & (1 << tlabel)) {
+ spin_unlock_irqrestore(&card->lock, flags);
+ callback(card, RCODE_SEND_ERROR, NULL, 0, callback_data);
+ return;
+ }
+
+ card->current_tlabel = (card->current_tlabel + 1) & 0x1f;
+ card->tlabel_mask |= (1 << tlabel);
+
+ list_add_tail(&t->link, &card->transaction_list);
+
+ spin_unlock_irqrestore(&card->lock, flags);
+
+ /* Initialize rest of transaction, fill out packet and send it. */
+ t->node_id = node_id;
+ t->tlabel = tlabel;
+ t->callback = callback;
+ t->callback_data = callback_data;
+
+ fw_fill_request(&t->packet, tcode, t->tlabel,
+ node_id, source, generation,
+ speed, offset, payload, length);
+ t->packet.callback = transmit_complete_callback;
+
+ card->driver->send_request(card, &t->packet);
+}
+EXPORT_SYMBOL(fw_send_request);
+
+static void
+transmit_phy_packet_callback(struct fw_packet *packet,
+ struct fw_card *card, int status)
+{
+ kfree(packet);
+}
+
+static void send_phy_packet(struct fw_card *card, u32 data, int generation)
+{
+ struct fw_packet *packet;
+
+ packet = kzalloc(sizeof(*packet), GFP_ATOMIC);
+ if (packet == NULL)
+ return;
+
+ packet->header[0] = data;
+ packet->header[1] = ~data;
+ packet->header_length = 8;
+ packet->payload_length = 0;
+ packet->speed = SCODE_100;
+ packet->generation = generation;
+ packet->callback = transmit_phy_packet_callback;
+
+ card->driver->send_request(card, packet);
+}
+
+void fw_send_phy_config(struct fw_card *card,
+ int node_id, int generation, int gap_count)
+{
+ u32 q;
+
+ q = PHY_IDENTIFIER(PHY_PACKET_CONFIG) |
+ PHY_CONFIG_ROOT_ID(node_id) |
+ PHY_CONFIG_GAP_COUNT(gap_count);
+
+ send_phy_packet(card, q, generation);
+}
+
+void fw_flush_transactions(struct fw_card *card)
+{
+ struct fw_transaction *t, *next;
+ struct list_head list;
+ unsigned long flags;
+
+ INIT_LIST_HEAD(&list);
+ spin_lock_irqsave(&card->lock, flags);
+ list_splice_init(&card->transaction_list, &list);
+ card->tlabel_mask = 0;
+ spin_unlock_irqrestore(&card->lock, flags);
+
+ list_for_each_entry_safe(t, next, &list, link) {
+ card->driver->cancel_packet(card, &t->packet);
+
+ /*
+ * At this point cancel_packet will never call the
+ * transaction callback, since we just took all the
+ * transactions out of the list. So do it here.
+ */
+ t->callback(card, RCODE_CANCELLED, NULL, 0, t->callback_data);
+ }
+}
+
+static struct fw_address_handler *
+lookup_overlapping_address_handler(struct list_head *list,
+ unsigned long long offset, size_t length)
+{
+ struct fw_address_handler *handler;
+
+ list_for_each_entry(handler, list, link) {
+ if (handler->offset < offset + length &&
+ offset < handler->offset + handler->length)
+ return handler;
+ }
+
+ return NULL;
+}
+
+static struct fw_address_handler *
+lookup_enclosing_address_handler(struct list_head *list,
+ unsigned long long offset, size_t length)
+{
+ struct fw_address_handler *handler;
+
+ list_for_each_entry(handler, list, link) {
+ if (handler->offset <= offset &&
+ offset + length <= handler->offset + handler->length)
+ return handler;
+ }
+
+ return NULL;
+}
+
+static DEFINE_SPINLOCK(address_handler_lock);
+static LIST_HEAD(address_handler_list);
+
+const struct fw_address_region fw_low_memory_region =
+ { .start = 0x000000000000ULL, .end = 0x000100000000ULL, };
+const struct fw_address_region fw_high_memory_region =
+ { .start = 0x000100000000ULL, .end = 0xffffe0000000ULL, };
+const struct fw_address_region fw_private_region =
+ { .start = 0xffffe0000000ULL, .end = 0xfffff0000000ULL, };
+const struct fw_address_region fw_csr_region =
+ { .start = 0xfffff0000000ULL, .end = 0xfffff0000800ULL, };
+const struct fw_address_region fw_unit_space_region =
+ { .start = 0xfffff0000900ULL, .end = 0x1000000000000ULL, };
+EXPORT_SYMBOL(fw_low_memory_region);
+EXPORT_SYMBOL(fw_high_memory_region);
+EXPORT_SYMBOL(fw_private_region);
+EXPORT_SYMBOL(fw_csr_region);
+EXPORT_SYMBOL(fw_unit_space_region);
+
+/**
+ * Allocate a range of addresses in the node space of the OHCI
+ * controller. When a request is received that falls within the
+ * specified address range, the specified callback is invoked. The
+ * parameters passed to the callback give the details of the
+ * particular request
+ */
+int
+fw_core_add_address_handler(struct fw_address_handler *handler,
+ const struct fw_address_region *region)
+{
+ struct fw_address_handler *other;
+ unsigned long flags;
+ int ret = -EBUSY;
+
+ spin_lock_irqsave(&address_handler_lock, flags);
+
+ handler->offset = region->start;
+ while (handler->offset + handler->length <= region->end) {
+ other =
+ lookup_overlapping_address_handler(&address_handler_list,
+ handler->offset,
+ handler->length);
+ if (other != NULL) {
+ handler->offset += other->length;
+ } else {
+ list_add_tail(&handler->link, &address_handler_list);
+ ret = 0;
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&address_handler_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(fw_core_add_address_handler);
+
+/**
+ * Deallocate a range of addresses allocated with fw_allocate. This
+ * will call the associated callback one last time with a the special
+ * tcode TCODE_DEALLOCATE, to let the client destroy the registered
+ * callback data. For convenience, the callback parameters offset and
+ * length are set to the start and the length respectively for the
+ * deallocated region, payload is set to NULL.
+ */
+void fw_core_remove_address_handler(struct fw_address_handler *handler)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&address_handler_lock, flags);
+ list_del(&handler->link);
+ spin_unlock_irqrestore(&address_handler_lock, flags);
+}
+EXPORT_SYMBOL(fw_core_remove_address_handler);
+
+struct fw_request {
+ struct fw_packet response;
+ u32 request_header[4];
+ int ack;
+ u32 length;
+ u32 data[0];
+};
+
+static void
+free_response_callback(struct fw_packet *packet,
+ struct fw_card *card, int status)
+{
+ struct fw_request *request;
+
+ request = container_of(packet, struct fw_request, response);
+ kfree(request);
+}
+
+void
+fw_fill_response(struct fw_packet *response, u32 *request_header,
+ int rcode, void *payload, size_t length)
+{
+ int tcode, tlabel, extended_tcode, source, destination;
+
+ tcode = HEADER_GET_TCODE(request_header[0]);
+ tlabel = HEADER_GET_TLABEL(request_header[0]);
+ source = HEADER_GET_DESTINATION(request_header[0]);
+ destination = HEADER_GET_SOURCE(request_header[1]);
+ extended_tcode = HEADER_GET_EXTENDED_TCODE(request_header[3]);
+
+ response->header[0] =
+ HEADER_RETRY(RETRY_1) |
+ HEADER_TLABEL(tlabel) |
+ HEADER_DESTINATION(destination);
+ response->header[1] =
+ HEADER_SOURCE(source) |
+ HEADER_RCODE(rcode);
+ response->header[2] = 0;
+
+ switch (tcode) {
+ case TCODE_WRITE_QUADLET_REQUEST:
+ case TCODE_WRITE_BLOCK_REQUEST:
+ response->header[0] |= HEADER_TCODE(TCODE_WRITE_RESPONSE);
+ response->header_length = 12;
+ response->payload_length = 0;
+ break;
+
+ case TCODE_READ_QUADLET_REQUEST:
+ response->header[0] |=
+ HEADER_TCODE(TCODE_READ_QUADLET_RESPONSE);
+ if (payload != NULL)
+ response->header[3] = *(u32 *)payload;
+ else
+ response->header[3] = 0;
+ response->header_length = 16;
+ response->payload_length = 0;
+ break;
+
+ case TCODE_READ_BLOCK_REQUEST:
+ case TCODE_LOCK_REQUEST:
+ response->header[0] |= HEADER_TCODE(tcode + 2);
+ response->header[3] =
+ HEADER_DATA_LENGTH(length) |
+ HEADER_EXTENDED_TCODE(extended_tcode);
+ response->header_length = 16;
+ response->payload = payload;
+ response->payload_length = length;
+ break;
+
+ default:
+ BUG();
+ return;
+ }
+}
+EXPORT_SYMBOL(fw_fill_response);
+
+static struct fw_request *
+allocate_request(struct fw_packet *p)
+{
+ struct fw_request *request;
+ u32 *data, length;
+ int request_tcode, t;
+
+ request_tcode = HEADER_GET_TCODE(p->header[0]);
+ switch (request_tcode) {
+ case TCODE_WRITE_QUADLET_REQUEST:
+ data = &p->header[3];
+ length = 4;
+ break;
+
+ case TCODE_WRITE_BLOCK_REQUEST:
+ case TCODE_LOCK_REQUEST:
+ data = p->payload;
+ length = HEADER_GET_DATA_LENGTH(p->header[3]);
+ break;
+
+ case TCODE_READ_QUADLET_REQUEST:
+ data = NULL;
+ length = 4;
+ break;
+
+ case TCODE_READ_BLOCK_REQUEST:
+ data = NULL;
+ length = HEADER_GET_DATA_LENGTH(p->header[3]);
+ break;
+
+ default:
+ BUG();
+ return NULL;
+ }
+
+ request = kmalloc(sizeof(*request) + length, GFP_ATOMIC);
+ if (request == NULL)
+ return NULL;
+
+ t = (p->timestamp & 0x1fff) + 4000;
+ if (t >= 8000)
+ t = (p->timestamp & ~0x1fff) + 0x2000 + t - 8000;
+ else
+ t = (p->timestamp & ~0x1fff) + t;
+
+ request->response.speed = p->speed;
+ request->response.timestamp = t;
+ request->response.generation = p->generation;
+ request->response.ack = 0;
+ request->response.callback = free_response_callback;
+ request->ack = p->ack;
+ request->length = length;
+ if (data)
+ memcpy(request->data, data, length);
+
+ memcpy(request->request_header, p->header, sizeof(p->header));
+
+ return request;
+}
+
+void
+fw_send_response(struct fw_card *card, struct fw_request *request, int rcode)
+{
+ /*
+ * Broadcast packets are reported as ACK_COMPLETE, so this
+ * check is sufficient to ensure we don't send response to
+ * broadcast packets or posted writes.
+ */
+ if (request->ack != ACK_PENDING)
+ return;
+
+ if (rcode == RCODE_COMPLETE)
+ fw_fill_response(&request->response, request->request_header,
+ rcode, request->data, request->length);
+ else
+ fw_fill_response(&request->response, request->request_header,
+ rcode, NULL, 0);
+
+ card->driver->send_response(card, &request->response);
+}
+EXPORT_SYMBOL(fw_send_response);
+
+void
+fw_core_handle_request(struct fw_card *card, struct fw_packet *p)
+{
+ struct fw_address_handler *handler;
+ struct fw_request *request;
+ unsigned long long offset;
+ unsigned long flags;
+ int tcode, destination, source;
+
+ if (p->payload_length > 2048) {
+ /* FIXME: send error response. */
+ return;
+ }
+
+ if (p->ack != ACK_PENDING && p->ack != ACK_COMPLETE)
+ return;
+
+ request = allocate_request(p);
+ if (request == NULL) {
+ /* FIXME: send statically allocated busy packet. */
+ return;
+ }
+
+ offset =
+ ((unsigned long long)
+ HEADER_GET_OFFSET_HIGH(p->header[1]) << 32) | p->header[2];
+ tcode = HEADER_GET_TCODE(p->header[0]);
+ destination = HEADER_GET_DESTINATION(p->header[0]);
+ source = HEADER_GET_SOURCE(p->header[0]);
+
+ spin_lock_irqsave(&address_handler_lock, flags);
+ handler = lookup_enclosing_address_handler(&address_handler_list,
+ offset, request->length);
+ spin_unlock_irqrestore(&address_handler_lock, flags);
+
+ /*
+ * FIXME: lookup the fw_node corresponding to the sender of
+ * this request and pass that to the address handler instead
+ * of the node ID. We may also want to move the address
+ * allocations to fw_node so we only do this callback if the
+ * upper layers registered it for this node.
+ */
+
+ if (handler == NULL)
+ fw_send_response(card, request, RCODE_ADDRESS_ERROR);
+ else
+ handler->address_callback(card, request,
+ tcode, destination, source,
+ p->generation, p->speed, offset,
+ request->data, request->length,
+ handler->callback_data);
+}
+EXPORT_SYMBOL(fw_core_handle_request);
+
+void
+fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
+{
+ struct fw_transaction *t;
+ unsigned long flags;
+ u32 *data;
+ size_t data_length;
+ int tcode, tlabel, destination, source, rcode;
+
+ tcode = HEADER_GET_TCODE(p->header[0]);
+ tlabel = HEADER_GET_TLABEL(p->header[0]);
+ destination = HEADER_GET_DESTINATION(p->header[0]);
+ source = HEADER_GET_SOURCE(p->header[1]);
+ rcode = HEADER_GET_RCODE(p->header[1]);
+
+ spin_lock_irqsave(&card->lock, flags);
+ list_for_each_entry(t, &card->transaction_list, link) {
+ if (t->node_id == source && t->tlabel == tlabel) {
+ list_del(&t->link);
+ card->tlabel_mask &= ~(1 << t->tlabel);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&card->lock, flags);
+
+ if (&t->link == &card->transaction_list) {
+ fw_notify("Unsolicited response (source %x, tlabel %x)\n",
+ source, tlabel);
+ return;
+ }
+
+ /*
+ * FIXME: sanity check packet, is length correct, does tcodes
+ * and addresses match.
+ */
+
+ switch (tcode) {
+ case TCODE_READ_QUADLET_RESPONSE:
+ data = (u32 *) &p->header[3];
+ data_length = 4;
+ break;
+
+ case TCODE_WRITE_RESPONSE:
+ data = NULL;
+ data_length = 0;
+ break;
+
+ case TCODE_READ_BLOCK_RESPONSE:
+ case TCODE_LOCK_RESPONSE:
+ data = p->payload;
+ data_length = HEADER_GET_DATA_LENGTH(p->header[3]);
+ break;
+
+ default:
+ /* Should never happen, this is just to shut up gcc. */
+ data = NULL;
+ data_length = 0;
+ break;
+ }
+
+ t->callback(card, rcode, data, data_length, t->callback_data);
+}
+EXPORT_SYMBOL(fw_core_handle_response);
+
+const struct fw_address_region topology_map_region =
+ { .start = 0xfffff0001000ull, .end = 0xfffff0001400ull, };
+
+static void
+handle_topology_map(struct fw_card *card, struct fw_request *request,
+ int tcode, int destination, int source,
+ int generation, int speed,
+ unsigned long long offset,
+ void *payload, size_t length, void *callback_data)
+{
+ int i, start, end;
+ u32 *map;
+
+ if (!TCODE_IS_READ_REQUEST(tcode)) {
+ fw_send_response(card, request, RCODE_TYPE_ERROR);
+ return;
+ }
+
+ if ((offset & 3) > 0 || (length & 3) > 0) {
+ fw_send_response(card, request, RCODE_ADDRESS_ERROR);
+ return;
+ }
+
+ start = (offset - topology_map_region.start) / 4;
+ end = start + length / 4;
+ map = payload;
+
+ for (i = 0; i < length / 4; i++)
+ map[i] = cpu_to_be32(card->topology_map[start + i]);
+
+ fw_send_response(card, request, RCODE_COMPLETE);
+}
+
+static struct fw_address_handler topology_map = {
+ .length = 0x200,
+ .address_callback = handle_topology_map,
+};
+
+const struct fw_address_region registers_region =
+ { .start = 0xfffff0000000ull, .end = 0xfffff0000400ull, };
+
+static void
+handle_registers(struct fw_card *card, struct fw_request *request,
+ int tcode, int destination, int source,
+ int generation, int speed,
+ unsigned long long offset,
+ void *payload, size_t length, void *callback_data)
+{
+ int reg = offset - CSR_REGISTER_BASE;
+ unsigned long long bus_time;
+ __be32 *data = payload;
+
+ switch (reg) {
+ case CSR_CYCLE_TIME:
+ case CSR_BUS_TIME:
+ if (!TCODE_IS_READ_REQUEST(tcode) || length != 4) {
+ fw_send_response(card, request, RCODE_TYPE_ERROR);
+ break;
+ }
+
+ bus_time = card->driver->get_bus_time(card);
+ if (reg == CSR_CYCLE_TIME)
+ *data = cpu_to_be32(bus_time);
+ else
+ *data = cpu_to_be32(bus_time >> 25);
+ fw_send_response(card, request, RCODE_COMPLETE);
+ break;
+
+ case CSR_BUS_MANAGER_ID:
+ case CSR_BANDWIDTH_AVAILABLE:
+ case CSR_CHANNELS_AVAILABLE_HI:
+ case CSR_CHANNELS_AVAILABLE_LO:
+ /*
+ * FIXME: these are handled by the OHCI hardware and
+ * the stack never sees these request. If we add
+ * support for a new type of controller that doesn't
+ * handle this in hardware we need to deal with these
+ * transactions.
+ */
+ BUG();
+ break;
+
+ case CSR_BUSY_TIMEOUT:
+ /* FIXME: Implement this. */
+ default:
+ fw_send_response(card, request, RCODE_ADDRESS_ERROR);
+ break;
+ }
+}
+
+static struct fw_address_handler registers = {
+ .length = 0x400,
+ .address_callback = handle_registers,
+};
+
+MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
+MODULE_DESCRIPTION("Core IEEE1394 transaction logic");
+MODULE_LICENSE("GPL");
+
+static const u32 vendor_textual_descriptor[] = {
+ /* textual descriptor leaf () */
+ 0x00060000,
+ 0x00000000,
+ 0x00000000,
+ 0x4c696e75, /* L i n u */
+ 0x78204669, /* x F i */
+ 0x72657769, /* r e w i */
+ 0x72650000, /* r e */
+};
+
+static const u32 model_textual_descriptor[] = {
+ /* model descriptor leaf () */
+ 0x00030000,
+ 0x00000000,
+ 0x00000000,
+ 0x4a756a75, /* J u j u */
+};
+
+static struct fw_descriptor vendor_id_descriptor = {
+ .length = ARRAY_SIZE(vendor_textual_descriptor),
+ .immediate = 0x03d00d1e,
+ .key = 0x81000000,
+ .data = vendor_textual_descriptor,
+};
+
+static struct fw_descriptor model_id_descriptor = {
+ .length = ARRAY_SIZE(model_textual_descriptor),
+ .immediate = 0x17000001,
+ .key = 0x81000000,
+ .data = model_textual_descriptor,
+};
+
+static int __init fw_core_init(void)
+{
+ int retval;
+
+ retval = bus_register(&fw_bus_type);
+ if (retval < 0)
+ return retval;
+
+ fw_cdev_major = register_chrdev(0, "firewire", &fw_device_ops);
+ if (fw_cdev_major < 0) {
+ bus_unregister(&fw_bus_type);
+ return fw_cdev_major;
+ }
+
+ retval = fw_core_add_address_handler(&topology_map,
+ &topology_map_region);
+ BUG_ON(retval < 0);
+
+ retval = fw_core_add_address_handler(&registers,
+ &registers_region);
+ BUG_ON(retval < 0);
+
+ /* Add the vendor textual descriptor. */
+ retval = fw_core_add_descriptor(&vendor_id_descriptor);
+ BUG_ON(retval < 0);
+ retval = fw_core_add_descriptor(&model_id_descriptor);
+ BUG_ON(retval < 0);
+
+ return 0;
+}
+
+static void __exit fw_core_cleanup(void)
+{
+ unregister_chrdev(fw_cdev_major, "firewire");
+ bus_unregister(&fw_bus_type);
+}
+
+module_init(fw_core_init);
+module_exit(fw_core_cleanup);
diff --git a/drivers/firewire/fw-transaction.h b/drivers/firewire/fw-transaction.h
new file mode 100644
index 00000000000..acdc3be38c6
--- /dev/null
+++ b/drivers/firewire/fw-transaction.h
@@ -0,0 +1,458 @@
+/*
+ * Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef __fw_transaction_h
+#define __fw_transaction_h
+
+#include <linux/device.h>
+#include <linux/timer.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/fs.h>
+#include <linux/dma-mapping.h>
+#include <linux/firewire-constants.h>
+
+#define TCODE_IS_READ_REQUEST(tcode) (((tcode) & ~1) == 4)
+#define TCODE_IS_BLOCK_PACKET(tcode) (((tcode) & 1) != 0)
+#define TCODE_IS_REQUEST(tcode) (((tcode) & 2) == 0)
+#define TCODE_IS_RESPONSE(tcode) (((tcode) & 2) != 0)
+#define TCODE_HAS_REQUEST_DATA(tcode) (((tcode) & 12) != 4)
+#define TCODE_HAS_RESPONSE_DATA(tcode) (((tcode) & 12) != 0)
+
+#define LOCAL_BUS 0xffc0
+
+#define SELFID_PORT_CHILD 0x3
+#define SELFID_PORT_PARENT 0x2
+#define SELFID_PORT_NCONN 0x1
+#define SELFID_PORT_NONE 0x0
+
+#define PHY_PACKET_CONFIG 0x0
+#define PHY_PACKET_LINK_ON 0x1
+#define PHY_PACKET_SELF_ID 0x2
+
+/* Bit fields _within_ the PHY registers. */
+#define PHY_LINK_ACTIVE 0x80
+#define PHY_CONTENDER 0x40
+#define PHY_BUS_RESET 0x40
+#define PHY_BUS_SHORT_RESET 0x40
+
+#define CSR_REGISTER_BASE 0xfffff0000000ULL
+
+/* register offsets relative to CSR_REGISTER_BASE */
+#define CSR_STATE_CLEAR 0x0
+#define CSR_STATE_SET 0x4
+#define CSR_NODE_IDS 0x8
+#define CSR_RESET_START 0xc
+#define CSR_SPLIT_TIMEOUT_HI 0x18
+#define CSR_SPLIT_TIMEOUT_LO 0x1c
+#define CSR_CYCLE_TIME 0x200
+#define CSR_BUS_TIME 0x204
+#define CSR_BUSY_TIMEOUT 0x210
+#define CSR_BUS_MANAGER_ID 0x21c
+#define CSR_BANDWIDTH_AVAILABLE 0x220
+#define CSR_CHANNELS_AVAILABLE 0x224
+#define CSR_CHANNELS_AVAILABLE_HI 0x224
+#define CSR_CHANNELS_AVAILABLE_LO 0x228
+#define CSR_BROADCAST_CHANNEL 0x234
+#define CSR_CONFIG_ROM 0x400
+#define CSR_CONFIG_ROM_END 0x800
+#define CSR_FCP_COMMAND 0xB00
+#define CSR_FCP_RESPONSE 0xD00
+#define CSR_FCP_END 0xF00
+#define CSR_TOPOLOGY_MAP 0x1000
+#define CSR_TOPOLOGY_MAP_END 0x1400
+#define CSR_SPEED_MAP 0x2000
+#define CSR_SPEED_MAP_END 0x3000
+
+#define fw_notify(s, args...) printk(KERN_NOTICE KBUILD_MODNAME ": " s, ## args)
+#define fw_error(s, args...) printk(KERN_ERR KBUILD_MODNAME ": " s, ## args)
+#define fw_debug(s, args...) printk(KERN_DEBUG KBUILD_MODNAME ": " s, ## args)
+
+static inline void
+fw_memcpy_from_be32(void *_dst, void *_src, size_t size)
+{
+ u32 *dst = _dst;
+ u32 *src = _src;
+ int i;
+
+ for (i = 0; i < size / 4; i++)
+ dst[i] = cpu_to_be32(src[i]);
+}
+
+static inline void
+fw_memcpy_to_be32(void *_dst, void *_src, size_t size)
+{
+ fw_memcpy_from_be32(_dst, _src, size);
+}
+
+struct fw_card;
+struct fw_packet;
+struct fw_node;
+struct fw_request;
+
+struct fw_descriptor {
+ struct list_head link;
+ size_t length;
+ u32 immediate;
+ u32 key;
+ const u32 *data;
+};
+
+int fw_core_add_descriptor(struct fw_descriptor *desc);
+void fw_core_remove_descriptor(struct fw_descriptor *desc);
+
+typedef void (*fw_packet_callback_t)(struct fw_packet *packet,
+ struct fw_card *card, int status);
+
+typedef void (*fw_transaction_callback_t)(struct fw_card *card, int rcode,
+ void *data,
+ size_t length,
+ void *callback_data);
+
+typedef void (*fw_address_callback_t)(struct fw_card *card,
+ struct fw_request *request,
+ int tcode, int destination, int source,
+ int generation, int speed,
+ unsigned long long offset,
+ void *data, size_t length,
+ void *callback_data);
+
+typedef void (*fw_bus_reset_callback_t)(struct fw_card *handle,
+ int node_id, int generation,
+ u32 *self_ids,
+ int self_id_count,
+ void *callback_data);
+
+struct fw_packet {
+ int speed;
+ int generation;
+ u32 header[4];
+ size_t header_length;
+ void *payload;
+ size_t payload_length;
+ u32 timestamp;
+
+ /*
+ * This callback is called when the packet transmission has
+ * completed; for successful transmission, the status code is
+ * the ack received from the destination, otherwise it's a
+ * negative errno: ENOMEM, ESTALE, ETIMEDOUT, ENODEV, EIO.
+ * The callback can be called from tasklet context and thus
+ * must never block.
+ */
+ fw_packet_callback_t callback;
+ int ack;
+ struct list_head link;
+ void *driver_data;
+};
+
+struct fw_transaction {
+ int node_id; /* The generation is implied; it is always the current. */
+ int tlabel;
+ int timestamp;
+ struct list_head link;
+
+ struct fw_packet packet;
+
+ /*
+ * The data passed to the callback is valid only during the
+ * callback.
+ */
+ fw_transaction_callback_t callback;
+ void *callback_data;
+};
+
+static inline struct fw_packet *
+fw_packet(struct list_head *l)
+{
+ return list_entry(l, struct fw_packet, link);
+}
+
+struct fw_address_handler {
+ u64 offset;
+ size_t length;
+ fw_address_callback_t address_callback;
+ void *callback_data;
+ struct list_head link;
+};
+
+
+struct fw_address_region {
+ u64 start;
+ u64 end;
+};
+
+extern const struct fw_address_region fw_low_memory_region;
+extern const struct fw_address_region fw_high_memory_region;
+extern const struct fw_address_region fw_private_region;
+extern const struct fw_address_region fw_csr_region;
+extern const struct fw_address_region fw_unit_space_region;
+
+int fw_core_add_address_handler(struct fw_address_handler *handler,
+ const struct fw_address_region *region);
+void fw_core_remove_address_handler(struct fw_address_handler *handler);
+void fw_fill_response(struct fw_packet *response, u32 *request_header,
+ int rcode, void *payload, size_t length);
+void fw_send_response(struct fw_card *card,
+ struct fw_request *request, int rcode);
+
+extern struct bus_type fw_bus_type;
+
+struct fw_card {
+ const struct fw_card_driver *driver;
+ struct device *device;
+ struct kref kref;
+
+ int node_id;
+ int generation;
+ /* This is the generation used for timestamping incoming requests. */
+ int request_generation;
+ int current_tlabel, tlabel_mask;
+ struct list_head transaction_list;
+ struct timer_list flush_timer;
+ unsigned long reset_jiffies;
+
+ unsigned long long guid;
+ int max_receive;
+ int link_speed;
+ int config_rom_generation;
+
+ /*
+ * We need to store up to 4 self ID for a maximum of 63
+ * devices plus 3 words for the topology map header.
+ */
+ int self_id_count;
+ u32 topology_map[252 + 3];
+
+ spinlock_t lock; /* Take this lock when handling the lists in
+ * this struct. */
+ struct fw_node *local_node;
+ struct fw_node *root_node;
+ struct fw_node *irm_node;
+ int color;
+ int gap_count;
+ int topology_type;
+
+ int index;
+
+ struct list_head link;
+
+ /* Work struct for BM duties. */
+ struct delayed_work work;
+ int bm_retries;
+ int bm_generation;
+};
+
+struct fw_card *fw_card_get(struct fw_card *card);
+void fw_card_put(struct fw_card *card);
+
+/*
+ * The iso packet format allows for an immediate header/payload part
+ * stored in 'header' immediately after the packet info plus an
+ * indirect payload part that is pointer to by the 'payload' field.
+ * Applications can use one or the other or both to implement simple
+ * low-bandwidth streaming (e.g. audio) or more advanced
+ * scatter-gather streaming (e.g. assembling video frame automatically).
+ */
+
+struct fw_iso_packet {
+ u16 payload_length; /* Length of indirect payload. */
+ u32 interrupt : 1; /* Generate interrupt on this packet */
+ u32 skip : 1; /* Set to not send packet at all. */
+ u32 tag : 2;
+ u32 sy : 4;
+ u32 header_length : 8; /* Length of immediate header. */
+ u32 header[0];
+};
+
+#define FW_ISO_CONTEXT_TRANSMIT 0
+#define FW_ISO_CONTEXT_RECEIVE 1
+
+#define FW_ISO_CONTEXT_MATCH_TAG0 1
+#define FW_ISO_CONTEXT_MATCH_TAG1 2
+#define FW_ISO_CONTEXT_MATCH_TAG2 4
+#define FW_ISO_CONTEXT_MATCH_TAG3 8
+#define FW_ISO_CONTEXT_MATCH_ALL_TAGS 15
+
+struct fw_iso_context;
+
+typedef void (*fw_iso_callback_t)(struct fw_iso_context *context,
+ u32 cycle,
+ size_t header_length,
+ void *header,
+ void *data);
+
+/*
+ * An iso buffer is just a set of pages mapped for DMA in the
+ * specified direction. Since the pages are to be used for DMA, they
+ * are not mapped into the kernel virtual address space. We store the
+ * DMA address in the page private. The helper function
+ * fw_iso_buffer_map() will map the pages into a given vma.
+ */
+
+struct fw_iso_buffer {
+ enum dma_data_direction direction;
+ struct page **pages;
+ int page_count;
+};
+
+struct fw_iso_context {
+ struct fw_card *card;
+ int type;
+ int channel;
+ int speed;
+ size_t header_size;
+ fw_iso_callback_t callback;
+ void *callback_data;
+};
+
+int
+fw_iso_buffer_init(struct fw_iso_buffer *buffer,
+ struct fw_card *card,
+ int page_count,
+ enum dma_data_direction direction);
+int
+fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma);
+void
+fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, struct fw_card *card);
+
+struct fw_iso_context *
+fw_iso_context_create(struct fw_card *card, int type,
+ int channel, int speed, size_t header_size,
+ fw_iso_callback_t callback, void *callback_data);
+
+void
+fw_iso_context_destroy(struct fw_iso_context *ctx);
+
+int
+fw_iso_context_queue(struct fw_iso_context *ctx,
+ struct fw_iso_packet *packet,
+ struct fw_iso_buffer *buffer,
+ unsigned long payload);
+
+int
+fw_iso_context_start(struct fw_iso_context *ctx,
+ int cycle, int sync, int tags);
+
+int
+fw_iso_context_stop(struct fw_iso_context *ctx);
+
+struct fw_card_driver {
+ const char *name;
+
+ /*
+ * Enable the given card with the given initial config rom.
+ * This function is expected to activate the card, and either
+ * enable the PHY or set the link_on bit and initiate a bus
+ * reset.
+ */
+ int (*enable)(struct fw_card *card, u32 *config_rom, size_t length);
+
+ int (*update_phy_reg)(struct fw_card *card, int address,
+ int clear_bits, int set_bits);
+
+ /*
+ * Update the config rom for an enabled card. This function
+ * should change the config rom that is presented on the bus
+ * an initiate a bus reset.
+ */
+ int (*set_config_rom)(struct fw_card *card,
+ u32 *config_rom, size_t length);
+
+ void (*send_request)(struct fw_card *card, struct fw_packet *packet);
+ void (*send_response)(struct fw_card *card, struct fw_packet *packet);
+ /* Calling cancel is valid once a packet has been submitted. */
+ int (*cancel_packet)(struct fw_card *card, struct fw_packet *packet);
+
+ /*
+ * Allow the specified node ID to do direct DMA out and in of
+ * host memory. The card will disable this for all node when
+ * a bus reset happens, so driver need to reenable this after
+ * bus reset. Returns 0 on success, -ENODEV if the card
+ * doesn't support this, -ESTALE if the generation doesn't
+ * match.
+ */
+ int (*enable_phys_dma)(struct fw_card *card,
+ int node_id, int generation);
+
+ u64 (*get_bus_time)(struct fw_card *card);
+
+ struct fw_iso_context *
+ (*allocate_iso_context)(struct fw_card *card,
+ int type, size_t header_size);
+ void (*free_iso_context)(struct fw_iso_context *ctx);
+
+ int (*start_iso)(struct fw_iso_context *ctx,
+ s32 cycle, u32 sync, u32 tags);
+
+ int (*queue_iso)(struct fw_iso_context *ctx,
+ struct fw_iso_packet *packet,
+ struct fw_iso_buffer *buffer,
+ unsigned long payload);
+
+ int (*stop_iso)(struct fw_iso_context *ctx);
+};
+
+int
+fw_core_initiate_bus_reset(struct fw_card *card, int short_reset);
+
+void
+fw_send_request(struct fw_card *card, struct fw_transaction *t,
+ int tcode, int node_id, int generation, int speed,
+ unsigned long long offset,
+ void *data, size_t length,
+ fw_transaction_callback_t callback, void *callback_data);
+
+int fw_cancel_transaction(struct fw_card *card,
+ struct fw_transaction *transaction);
+
+void fw_flush_transactions(struct fw_card *card);
+
+void fw_send_phy_config(struct fw_card *card,
+ int node_id, int generation, int gap_count);
+
+/*
+ * Called by the topology code to inform the device code of node
+ * activity; found, lost, or updated nodes.
+ */
+void
+fw_node_event(struct fw_card *card, struct fw_node *node, int event);
+
+/* API used by card level drivers */
+
+void
+fw_card_initialize(struct fw_card *card, const struct fw_card_driver *driver,
+ struct device *device);
+int
+fw_card_add(struct fw_card *card,
+ u32 max_receive, u32 link_speed, u64 guid);
+
+void
+fw_core_remove_card(struct fw_card *card);
+
+void
+fw_core_handle_bus_reset(struct fw_card *card,
+ int node_id, int generation,
+ int self_id_count, u32 *self_ids);
+void
+fw_core_handle_request(struct fw_card *card, struct fw_packet *request);
+
+void
+fw_core_handle_response(struct fw_card *card, struct fw_packet *packet);
+
+#endif /* __fw_transaction_h */
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 3ba3a5221c4..4d1cb5b855d 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -4,6 +4,7 @@
menuconfig HWMON
tristate "Hardware Monitoring support"
+ depends on HAS_IOMEM
default y
help
Hardware monitoring devices let you monitor the hardware health
diff --git a/drivers/hwmon/ams/ams-input.c b/drivers/hwmon/ams/ams-input.c
index 18210164e30..ca7095d96ad 100644
--- a/drivers/hwmon/ams/ams-input.c
+++ b/drivers/hwmon/ams/ams-input.c
@@ -87,7 +87,7 @@ static void ams_input_enable(void)
ams_info.idev->id.vendor = 0;
ams_info.idev->open = ams_input_open;
ams_info.idev->close = ams_input_close;
- ams_info.idev->cdev.dev = &ams_info.of_dev->dev;
+ ams_info.idev->dev.parent = &ams_info.of_dev->dev;
input_set_abs_params(ams_info.idev, ABS_X, -50, 50, 3, 0);
input_set_abs_params(ams_info.idev, ABS_Y, -50, 50, 3, 0);
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
index b51c104a28a..0c160675b3a 100644
--- a/drivers/hwmon/applesmc.c
+++ b/drivers/hwmon/applesmc.c
@@ -1100,7 +1100,7 @@ static int applesmc_create_accelerometer(void)
/* initialize the input class */
applesmc_idev->name = "applesmc";
applesmc_idev->id.bustype = BUS_HOST;
- applesmc_idev->cdev.dev = &pdev->dev;
+ applesmc_idev->dev.parent = &pdev->dev;
applesmc_idev->evbit[0] = BIT(EV_ABS);
applesmc_idev->open = applesmc_idev_open;
applesmc_idev->close = applesmc_idev_close;
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index 03b1f650d1c..75e3911810a 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -309,9 +309,11 @@ static int coretemp_cpu_callback(struct notifier_block *nfb,
switch (action) {
case CPU_ONLINE:
+ case CPU_ONLINE_FROZEN:
coretemp_device_add(cpu);
break;
case CPU_DEAD:
+ case CPU_DEAD_FROZEN:
coretemp_device_remove(cpu);
break;
}
diff --git a/drivers/hwmon/hdaps.c b/drivers/hwmon/hdaps.c
index f82fa2d23f9..e0cf5e6fe5b 100644
--- a/drivers/hwmon/hdaps.c
+++ b/drivers/hwmon/hdaps.c
@@ -574,7 +574,7 @@ static int __init hdaps_init(void)
/* initialize the input class */
hdaps_idev->name = "hdaps";
- hdaps_idev->cdev.dev = &pdev->dev;
+ hdaps_idev->dev.parent = &pdev->dev;
hdaps_idev->evbit[0] = BIT(EV_ABS);
input_set_abs_params(hdaps_idev, ABS_X,
-256, 256, HDAPS_INPUT_FUZZ, HDAPS_INPUT_FLAT);
diff --git a/drivers/i2c/Kconfig b/drivers/i2c/Kconfig
index 434a61b415a..96867347bcb 100644
--- a/drivers/i2c/Kconfig
+++ b/drivers/i2c/Kconfig
@@ -4,6 +4,7 @@
menuconfig I2C
tristate "I2C support"
+ depends on HAS_IOMEM
---help---
I2C (pronounce: I-square-C) is a slow serial bus protocol used in
many micro controller applications and developed by Philips. SMBus,
diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c
index f35156c5892..9c8b6d5eaec 100644
--- a/drivers/i2c/busses/i2c-at91.c
+++ b/drivers/i2c/busses/i2c-at91.c
@@ -16,6 +16,7 @@
#include <linux/module.h>
#include <linux/version.h>
#include <linux/kernel.h>
+#include <linux/err.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/delay.h>
@@ -226,13 +227,14 @@ static int __devinit at91_i2c_probe(struct platform_device *pdev)
adapter->algo = &at91_algorithm;
adapter->class = I2C_CLASS_HWMON;
adapter->dev.parent = &pdev->dev;
+ /* adapter->id == 0 ... only one TWI controller for now */
platform_set_drvdata(pdev, adapter);
clk_enable(twi_clk); /* enable peripheral clock */
at91_twi_hwinit(); /* initialize TWI controller */
- rc = i2c_add_adapter(adapter);
+ rc = i2c_add_numbered_adapter(adapter);
if (rc) {
dev_err(&pdev->dev, "Adapter %s registration failed\n",
adapter->name);
@@ -295,6 +297,9 @@ static int at91_i2c_resume(struct platform_device *pdev)
#define at91_i2c_resume NULL
#endif
+/* work with "modprobe at91_i2c" from hotplugging or coldplugging */
+MODULE_ALIAS("at91_i2c");
+
static struct platform_driver at91_i2c_driver = {
.probe = at91_i2c_probe,
.remove = __devexit_p(at91_i2c_remove),
diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
index 873544ab598..8a0a99b9364 100644
--- a/drivers/i2c/busses/i2c-pxa.c
+++ b/drivers/i2c/busses/i2c-pxa.c
@@ -548,7 +548,7 @@ static inline void i2c_pxa_stop_message(struct pxa_i2c *i2c)
*/
icr = readl(_ICR(i2c));
icr &= ~(ICR_STOP | ICR_ACKNAK);
- writel(icr, _IRC(i2c));
+ writel(icr, _ICR(i2c));
}
/*
diff --git a/drivers/i2c/chips/tps65010.c b/drivers/i2c/chips/tps65010.c
index 7ed92dc3d83..3c3f2ebf3fc 100644
--- a/drivers/i2c/chips/tps65010.c
+++ b/drivers/i2c/chips/tps65010.c
@@ -354,7 +354,7 @@ static void tps65010_interrupt(struct tps65010 *tps)
* also needs to get error handling and probably
* an #ifdef CONFIG_SOFTWARE_SUSPEND
*/
- pm_suspend(PM_SUSPEND_DISK);
+ hibernate();
#endif
poll = 1;
}
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
index 5bdf64b7791..9040809d2c2 100644
--- a/drivers/ide/Kconfig
+++ b/drivers/ide/Kconfig
@@ -7,6 +7,7 @@
if BLOCK
menu "ATA/ATAPI/MFM/RLL support"
+ depends on HAS_IOMEM
config IDE
tristate "ATA/ATAPI/MFM/RLL support"
@@ -291,6 +292,17 @@ config IDE_TASK_IOCTL
If you are unsure, say N here.
+config IDE_PROC_FS
+ bool "legacy /proc/ide/ support"
+ depends on IDE && PROC_FS
+ default y
+ help
+ This option enables support for the various files in
+ /proc/ide. In Linux 2.6 this has been superseded by
+ files in sysfs but many legacy applications rely on this.
+
+ If unsure say Y.
+
comment "IDE chipset support/bugfixes"
config IDE_GENERIC
@@ -360,6 +372,9 @@ config IDEPCI_SHARE_IRQ
It is safe to say Y to this question, in most cases.
If unsure, say N.
+config IDEPCI_PCIBUS_ORDER
+ def_bool PCI && BLK_DEV_IDE=y && BLK_DEV_IDEPCI
+
config BLK_DEV_OFFBOARD
bool "Boot off-board chipsets first support"
depends on PCI && BLK_DEV_IDEPCI
diff --git a/drivers/ide/Makefile b/drivers/ide/Makefile
index d9f029e8ff7..75dc6969e0a 100644
--- a/drivers/ide/Makefile
+++ b/drivers/ide/Makefile
@@ -20,7 +20,7 @@ ide-core-$(CONFIG_BLK_DEV_CMD640) += pci/cmd640.o
# Core IDE code - must come before legacy
ide-core-$(CONFIG_BLK_DEV_IDEPCI) += setup-pci.o
ide-core-$(CONFIG_BLK_DEV_IDEDMA) += ide-dma.o
-ide-core-$(CONFIG_PROC_FS) += ide-proc.o
+ide-core-$(CONFIG_IDE_PROC_FS) += ide-proc.o
ide-core-$(CONFIG_BLK_DEV_IDEPNP) += ide-pnp.o
ide-core-$(CONFIG_BLK_DEV_IDEACPI) += ide-acpi.o
diff --git a/drivers/ide/arm/bast-ide.c b/drivers/ide/arm/bast-ide.c
index 9d474e5fd8d..f7449d04114 100644
--- a/drivers/ide/arm/bast-ide.c
+++ b/drivers/ide/arm/bast-ide.c
@@ -45,7 +45,7 @@ bastide_register(unsigned int base, unsigned int aux, int irq,
hw.io_ports[IDE_CONTROL_OFFSET] = aux + (6 * 0x20);
hw.irq = irq;
- ide_register_hw(&hw, hwif);
+ ide_register_hw(&hw, 0, hwif);
return 0;
}
diff --git a/drivers/ide/arm/icside.c b/drivers/ide/arm/icside.c
index e2953fc1faf..1fe0457243d 100644
--- a/drivers/ide/arm/icside.c
+++ b/drivers/ide/arm/icside.c
@@ -342,7 +342,7 @@ static int icside_dma_check(ide_drive_t *drive)
* Enable DMA on any drive that has multiword DMA
*/
if (id->field_valid & 2) {
- xfer_mode = ide_dma_speed(drive, 0);
+ xfer_mode = ide_max_dma_mode(drive);
goto out;
}
@@ -591,7 +591,8 @@ icside_register_v5(struct icside_state *state, struct expansion_card *ec)
state->hwif[0] = hwif;
probe_hwif_init(hwif);
- create_proc_ide_interfaces();
+
+ ide_proc_register_port(hwif);
return 0;
}
@@ -679,7 +680,9 @@ icside_register_v6(struct icside_state *state, struct expansion_card *ec)
probe_hwif_init(hwif);
probe_hwif_init(mate);
- create_proc_ide_interfaces();
+
+ ide_proc_register_port(hwif);
+ ide_proc_register_port(mate);
return 0;
diff --git a/drivers/ide/arm/ide_arm.c b/drivers/ide/arm/ide_arm.c
index 23488c4d1fc..a3d6744e870 100644
--- a/drivers/ide/arm/ide_arm.c
+++ b/drivers/ide/arm/ide_arm.c
@@ -38,6 +38,6 @@ void __init ide_arm_init(void)
memset(&hw, 0, sizeof(hw));
ide_std_init_ports(&hw, IDE_ARM_IO, IDE_ARM_IO + 0x206);
hw.irq = IDE_ARM_IRQ;
- ide_register_hw(&hw, NULL);
+ ide_register_hw(&hw, 1, NULL);
}
}
diff --git a/drivers/ide/arm/rapide.c b/drivers/ide/arm/rapide.c
index 9c6c49fdd2b..890ea3fac3c 100644
--- a/drivers/ide/arm/rapide.c
+++ b/drivers/ide/arm/rapide.c
@@ -76,7 +76,7 @@ rapide_probe(struct expansion_card *ec, const struct ecard_id *id)
hwif->gendev.parent = &ec->dev;
hwif->noprobe = 0;
probe_hwif_init(hwif);
- create_proc_ide_interfaces();
+ ide_proc_register_port(hwif);
ecard_set_drvdata(ec, hwif);
goto out;
}
diff --git a/drivers/ide/cris/ide-cris.c b/drivers/ide/cris/ide-cris.c
index 5e8efc89255..c04cb25a01f 100644
--- a/drivers/ide/cris/ide-cris.c
+++ b/drivers/ide/cris/ide-cris.c
@@ -796,7 +796,7 @@ init_e100_ide (void)
ide_offsets,
0, 0, cris_ide_ack_intr,
ide_default_irq(0));
- ide_register_hw(&hw, &hwif);
+ ide_register_hw(&hw, 1, &hwif);
hwif->mmio = 1;
hwif->chipset = ide_etrax100;
hwif->tuneproc = &tune_cris_ide;
@@ -1004,7 +1004,7 @@ static int cris_ide_build_dmatable (ide_drive_t *drive)
static int cris_config_drive_for_dma (ide_drive_t *drive)
{
- u8 speed = ide_dma_speed(drive, 1);
+ u8 speed = ide_max_dma_mode(drive);
if (!speed)
return 0;
diff --git a/drivers/ide/h8300/ide-h8300.c b/drivers/ide/h8300/ide-h8300.c
index 88750a30033..6d26ad7360d 100644
--- a/drivers/ide/h8300/ide-h8300.c
+++ b/drivers/ide/h8300/ide-h8300.c
@@ -101,7 +101,7 @@ void __init h8300_ide_init(void)
hw_setup(&hw);
/* register if */
- idx = ide_register_hw(&hw, &hwif);
+ idx = ide_register_hw(&hw, 1, &hwif);
if (idx == -1) {
printk(KERN_ERR "ide-h8300: IDE I/F register failed\n");
return;
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 638becda81c..252ab8295ed 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -3059,10 +3059,14 @@ int ide_cdrom_probe_capabilities (ide_drive_t *drive)
return nslots;
}
+#ifdef CONFIG_IDE_PROC_FS
static void ide_cdrom_add_settings(ide_drive_t *drive)
{
- ide_add_setting(drive, "dsc_overlap", SETTING_RW, -1, -1, TYPE_BYTE, 0, 1, 1, 1, &drive->dsc_overlap, NULL);
+ ide_add_setting(drive, "dsc_overlap", SETTING_RW, TYPE_BYTE, 0, 1, 1, 1, &drive->dsc_overlap, NULL);
}
+#else
+static inline void ide_cdrom_add_settings(ide_drive_t *drive) { ; }
+#endif
/*
* standard prep_rq_fn that builds 10 byte cmds
@@ -3274,7 +3278,7 @@ int ide_cdrom_setup (ide_drive_t *drive)
return 0;
}
-#ifdef CONFIG_PROC_FS
+#ifdef CONFIG_IDE_PROC_FS
static
sector_t ide_cdrom_capacity (ide_drive_t *drive)
{
@@ -3291,7 +3295,7 @@ static void ide_cd_remove(ide_drive_t *drive)
{
struct cdrom_info *info = drive->driver_data;
- ide_unregister_subdriver(drive, info->driver);
+ ide_proc_unregister_driver(drive, info->driver);
del_gendisk(info->disk);
@@ -3321,7 +3325,7 @@ static void ide_cd_release(struct kref *kref)
static int ide_cd_probe(ide_drive_t *);
-#ifdef CONFIG_PROC_FS
+#ifdef CONFIG_IDE_PROC_FS
static int proc_idecd_read_capacity
(char *page, char **start, off_t off, int count, int *eof, void *data)
{
@@ -3336,8 +3340,6 @@ static ide_proc_entry_t idecd_proc[] = {
{ "capacity", S_IFREG|S_IRUGO, proc_idecd_read_capacity, NULL },
{ NULL, 0, NULL, NULL }
};
-#else
-# define idecd_proc NULL
#endif
static ide_driver_t ide_cdrom_driver = {
@@ -3355,7 +3357,9 @@ static ide_driver_t ide_cdrom_driver = {
.end_request = ide_end_request,
.error = __ide_error,
.abort = __ide_abort,
+#ifdef CONFIG_IDE_PROC_FS
.proc = idecd_proc,
+#endif
};
static int idecd_open(struct inode * inode, struct file * file)
@@ -3517,7 +3521,7 @@ static int ide_cd_probe(ide_drive_t *drive)
ide_init_disk(g, drive);
- ide_register_subdriver(drive, &ide_cdrom_driver);
+ ide_proc_register_driver(drive, &ide_cdrom_driver);
kref_init(&info->kref);
@@ -3534,7 +3538,7 @@ static int ide_cd_probe(ide_drive_t *drive)
g->flags = GENHD_FL_CD | GENHD_FL_REMOVABLE;
if (ide_cdrom_setup(drive)) {
struct cdrom_device_info *devinfo = &info->devinfo;
- ide_unregister_subdriver(drive, &ide_cdrom_driver);
+ ide_proc_unregister_driver(drive, &ide_cdrom_driver);
kfree(info->buffer);
kfree(info->toc);
kfree(info->changer_info);
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index 37aa6ddd970..7fff773f2df 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -559,8 +559,7 @@ static sector_t idedisk_capacity (ide_drive_t *drive)
return drive->capacity64 - drive->sect0;
}
-#ifdef CONFIG_PROC_FS
-
+#ifdef CONFIG_IDE_PROC_FS
static int smart_enable(ide_drive_t *drive)
{
ide_task_t args;
@@ -678,12 +677,7 @@ static ide_proc_entry_t idedisk_proc[] = {
{ "smart_thresholds", S_IFREG|S_IRUSR, proc_idedisk_read_smart_thresholds, NULL },
{ NULL, 0, NULL, NULL }
};
-
-#else
-
-#define idedisk_proc NULL
-
-#endif /* CONFIG_PROC_FS */
+#endif /* CONFIG_IDE_PROC_FS */
static void idedisk_prepare_flush(request_queue_t *q, struct request *rq)
{
@@ -737,6 +731,9 @@ static int set_multcount(ide_drive_t *drive, int arg)
{
struct request rq;
+ if (arg < 0 || arg > drive->id->max_multsect)
+ return -EINVAL;
+
if (drive->special.b.set_multmode)
return -EBUSY;
ide_init_drive_cmd (&rq);
@@ -749,6 +746,9 @@ static int set_multcount(ide_drive_t *drive, int arg)
static int set_nowerr(ide_drive_t *drive, int arg)
{
+ if (arg < 0 || arg > 1)
+ return -EINVAL;
+
if (ide_spin_wait_hwgroup(drive))
return -EBUSY;
drive->nowerr = arg;
@@ -800,6 +800,9 @@ static int write_cache(ide_drive_t *drive, int arg)
ide_task_t args;
int err = 1;
+ if (arg < 0 || arg > 1)
+ return -EINVAL;
+
if (ide_id_has_flush_cache(drive->id)) {
memset(&args, 0, sizeof(ide_task_t));
args.tfRegister[IDE_FEATURE_OFFSET] = (arg) ?
@@ -835,6 +838,9 @@ static int set_acoustic (ide_drive_t *drive, int arg)
{
ide_task_t args;
+ if (arg < 0 || arg > 254)
+ return -EINVAL;
+
memset(&args, 0, sizeof(ide_task_t));
args.tfRegister[IDE_FEATURE_OFFSET] = (arg) ? SETFEATURES_EN_AAM :
SETFEATURES_DIS_AAM;
@@ -855,6 +861,9 @@ static int set_acoustic (ide_drive_t *drive, int arg)
*/
static int set_lba_addressing(ide_drive_t *drive, int arg)
{
+ if (arg < 0 || arg > 2)
+ return -EINVAL;
+
drive->addressing = 0;
if (HWIF(drive)->no_lba48)
@@ -866,23 +875,27 @@ static int set_lba_addressing(ide_drive_t *drive, int arg)
return 0;
}
+#ifdef CONFIG_IDE_PROC_FS
static void idedisk_add_settings(ide_drive_t *drive)
{
struct hd_driveid *id = drive->id;
- ide_add_setting(drive, "bios_cyl", SETTING_RW, -1, -1, TYPE_INT, 0, 65535, 1, 1, &drive->bios_cyl, NULL);
- ide_add_setting(drive, "bios_head", SETTING_RW, -1, -1, TYPE_BYTE, 0, 255, 1, 1, &drive->bios_head, NULL);
- ide_add_setting(drive, "bios_sect", SETTING_RW, -1, -1, TYPE_BYTE, 0, 63, 1, 1, &drive->bios_sect, NULL);
- ide_add_setting(drive, "address", SETTING_RW, HDIO_GET_ADDRESS, HDIO_SET_ADDRESS, TYPE_INTA, 0, 2, 1, 1, &drive->addressing, set_lba_addressing);
- ide_add_setting(drive, "bswap", SETTING_READ, -1, -1, TYPE_BYTE, 0, 1, 1, 1, &drive->bswap, NULL);
- ide_add_setting(drive, "multcount", id ? SETTING_RW : SETTING_READ, HDIO_GET_MULTCOUNT, HDIO_SET_MULTCOUNT, TYPE_BYTE, 0, id ? id->max_multsect : 0, 1, 1, &drive->mult_count, set_multcount);
- ide_add_setting(drive, "nowerr", SETTING_RW, HDIO_GET_NOWERR, HDIO_SET_NOWERR, TYPE_BYTE, 0, 1, 1, 1, &drive->nowerr, set_nowerr);
- ide_add_setting(drive, "lun", SETTING_RW, -1, -1, TYPE_INT, 0, 7, 1, 1, &drive->lun, NULL);
- ide_add_setting(drive, "wcache", SETTING_RW, HDIO_GET_WCACHE, HDIO_SET_WCACHE, TYPE_BYTE, 0, 1, 1, 1, &drive->wcache, write_cache);
- ide_add_setting(drive, "acoustic", SETTING_RW, HDIO_GET_ACOUSTIC, HDIO_SET_ACOUSTIC, TYPE_BYTE, 0, 254, 1, 1, &drive->acoustic, set_acoustic);
- ide_add_setting(drive, "failures", SETTING_RW, -1, -1, TYPE_INT, 0, 65535, 1, 1, &drive->failures, NULL);
- ide_add_setting(drive, "max_failures", SETTING_RW, -1, -1, TYPE_INT, 0, 65535, 1, 1, &drive->max_failures, NULL);
+ ide_add_setting(drive, "bios_cyl", SETTING_RW, TYPE_INT, 0, 65535, 1, 1, &drive->bios_cyl, NULL);
+ ide_add_setting(drive, "bios_head", SETTING_RW, TYPE_BYTE, 0, 255, 1, 1, &drive->bios_head, NULL);
+ ide_add_setting(drive, "bios_sect", SETTING_RW, TYPE_BYTE, 0, 63, 1, 1, &drive->bios_sect, NULL);
+ ide_add_setting(drive, "address", SETTING_RW, TYPE_BYTE, 0, 2, 1, 1, &drive->addressing, set_lba_addressing);
+ ide_add_setting(drive, "bswap", SETTING_READ, TYPE_BYTE, 0, 1, 1, 1, &drive->bswap, NULL);
+ ide_add_setting(drive, "multcount", SETTING_RW, TYPE_BYTE, 0, id->max_multsect, 1, 1, &drive->mult_count, set_multcount);
+ ide_add_setting(drive, "nowerr", SETTING_RW, TYPE_BYTE, 0, 1, 1, 1, &drive->nowerr, set_nowerr);
+ ide_add_setting(drive, "lun", SETTING_RW, TYPE_INT, 0, 7, 1, 1, &drive->lun, NULL);
+ ide_add_setting(drive, "wcache", SETTING_RW, TYPE_BYTE, 0, 1, 1, 1, &drive->wcache, write_cache);
+ ide_add_setting(drive, "acoustic", SETTING_RW, TYPE_BYTE, 0, 254, 1, 1, &drive->acoustic, set_acoustic);
+ ide_add_setting(drive, "failures", SETTING_RW, TYPE_INT, 0, 65535, 1, 1, &drive->failures, NULL);
+ ide_add_setting(drive, "max_failures", SETTING_RW, TYPE_INT, 0, 65535, 1, 1, &drive->max_failures, NULL);
}
+#else
+static inline void idedisk_add_settings(ide_drive_t *drive) { ; }
+#endif
static void idedisk_setup (ide_drive_t *drive)
{
@@ -1001,7 +1014,7 @@ static void ide_disk_remove(ide_drive_t *drive)
struct ide_disk_obj *idkp = drive->driver_data;
struct gendisk *g = idkp->disk;
- ide_unregister_subdriver(drive, idkp->driver);
+ ide_proc_unregister_driver(drive, idkp->driver);
del_gendisk(g);
@@ -1066,7 +1079,9 @@ static ide_driver_t idedisk_driver = {
.end_request = ide_end_request,
.error = __ide_error,
.abort = __ide_abort,
+#ifdef CONFIG_IDE_PROC_FS
.proc = idedisk_proc,
+#endif
};
static int idedisk_open(struct inode *inode, struct file *filp)
@@ -1140,9 +1155,49 @@ static int idedisk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
static int idedisk_ioctl(struct inode *inode, struct file *file,
unsigned int cmd, unsigned long arg)
{
+ unsigned long flags;
struct block_device *bdev = inode->i_bdev;
struct ide_disk_obj *idkp = ide_disk_g(bdev->bd_disk);
- return generic_ide_ioctl(idkp->drive, file, bdev, cmd, arg);
+ ide_drive_t *drive = idkp->drive;
+ int err, (*setfunc)(ide_drive_t *, int);
+ u8 *val;
+
+ switch (cmd) {
+ case HDIO_GET_ADDRESS: val = &drive->addressing; goto read_val;
+ case HDIO_GET_MULTCOUNT: val = &drive->mult_count; goto read_val;
+ case HDIO_GET_NOWERR: val = &drive->nowerr; goto read_val;
+ case HDIO_GET_WCACHE: val = &drive->wcache; goto read_val;
+ case HDIO_GET_ACOUSTIC: val = &drive->acoustic; goto read_val;
+ case HDIO_SET_ADDRESS: setfunc = set_lba_addressing; goto set_val;
+ case HDIO_SET_MULTCOUNT: setfunc = set_multcount; goto set_val;
+ case HDIO_SET_NOWERR: setfunc = set_nowerr; goto set_val;
+ case HDIO_SET_WCACHE: setfunc = write_cache; goto set_val;
+ case HDIO_SET_ACOUSTIC: setfunc = set_acoustic; goto set_val;
+ }
+
+ return generic_ide_ioctl(drive, file, bdev, cmd, arg);
+
+read_val:
+ down(&ide_setting_sem);
+ spin_lock_irqsave(&ide_lock, flags);
+ err = *val;
+ spin_unlock_irqrestore(&ide_lock, flags);
+ up(&ide_setting_sem);
+ return err >= 0 ? put_user(err, (long __user *)arg) : err;
+
+set_val:
+ if (bdev != bdev->bd_contains)
+ err = -EINVAL;
+ else {
+ if (!capable(CAP_SYS_ADMIN))
+ err = -EACCES;
+ else {
+ down(&ide_setting_sem);
+ err = setfunc(drive, arg);
+ up(&ide_setting_sem);
+ }
+ }
+ return err;
}
static int idedisk_media_changed(struct gendisk *disk)
@@ -1202,7 +1257,7 @@ static int ide_disk_probe(ide_drive_t *drive)
ide_init_disk(g, drive);
- ide_register_subdriver(drive, &idedisk_driver);
+ ide_proc_register_driver(drive, &idedisk_driver);
kref_init(&idkp->kref);
diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c
index fd213088b06..5fe85191d49 100644
--- a/drivers/ide/ide-dma.c
+++ b/drivers/ide/ide-dma.c
@@ -705,6 +705,100 @@ int ide_use_dma(ide_drive_t *drive)
EXPORT_SYMBOL_GPL(ide_use_dma);
+static const u8 xfer_mode_bases[] = {
+ XFER_UDMA_0,
+ XFER_MW_DMA_0,
+ XFER_SW_DMA_0,
+};
+
+static unsigned int ide_get_mode_mask(ide_drive_t *drive, u8 base)
+{
+ struct hd_driveid *id = drive->id;
+ ide_hwif_t *hwif = drive->hwif;
+ unsigned int mask = 0;
+
+ switch(base) {
+ case XFER_UDMA_0:
+ if ((id->field_valid & 4) == 0)
+ break;
+
+ mask = id->dma_ultra & hwif->ultra_mask;
+
+ if (hwif->udma_filter)
+ mask &= hwif->udma_filter(drive);
+
+ if ((mask & 0x78) && (eighty_ninty_three(drive) == 0))
+ mask &= 0x07;
+ break;
+ case XFER_MW_DMA_0:
+ mask = id->dma_mword & hwif->mwdma_mask;
+ break;
+ case XFER_SW_DMA_0:
+ mask = id->dma_1word & hwif->swdma_mask;
+ break;
+ default:
+ BUG();
+ break;
+ }
+
+ return mask;
+}
+
+/**
+ * ide_max_dma_mode - compute DMA speed
+ * @drive: IDE device
+ *
+ * Checks the drive capabilities and returns the speed to use
+ * for the DMA transfer. Returns 0 if the drive is incapable
+ * of DMA transfers.
+ */
+
+u8 ide_max_dma_mode(ide_drive_t *drive)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ unsigned int mask;
+ int x, i;
+ u8 mode = 0;
+
+ if (drive->media != ide_disk && hwif->atapi_dma == 0)
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(xfer_mode_bases); i++) {
+ mask = ide_get_mode_mask(drive, xfer_mode_bases[i]);
+ x = fls(mask) - 1;
+ if (x >= 0) {
+ mode = xfer_mode_bases[i] + x;
+ break;
+ }
+ }
+
+ printk(KERN_DEBUG "%s: selected mode 0x%x\n", drive->name, mode);
+
+ return mode;
+}
+
+EXPORT_SYMBOL_GPL(ide_max_dma_mode);
+
+int ide_tune_dma(ide_drive_t *drive)
+{
+ u8 speed;
+
+ /* TODO: use only ide_max_dma_mode() */
+ if (!ide_use_dma(drive))
+ return 0;
+
+ speed = ide_max_dma_mode(drive);
+
+ if (!speed)
+ return 0;
+
+ drive->hwif->speedproc(drive, speed);
+
+ return ide_dma_enable(drive);
+}
+
+EXPORT_SYMBOL_GPL(ide_tune_dma);
+
void ide_dma_verbose(ide_drive_t *drive)
{
struct hd_driveid *id = drive->id;
diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
index 57cd21c5b2c..f429be88c4f 100644
--- a/drivers/ide/ide-floppy.c
+++ b/drivers/ide/ide-floppy.c
@@ -1811,18 +1811,22 @@ static int idefloppy_identify_device (ide_drive_t *drive,struct hd_driveid *id)
return 0;
}
+#ifdef CONFIG_IDE_PROC_FS
static void idefloppy_add_settings(ide_drive_t *drive)
{
idefloppy_floppy_t *floppy = drive->driver_data;
/*
- * drive setting name read/write ioctl ioctl data type min max mul_factor div_factor data pointer set function
+ * drive setting name read/write data type min max mul_factor div_factor data pointer set function
*/
- ide_add_setting(drive, "bios_cyl", SETTING_RW, -1, -1, TYPE_INT, 0, 1023, 1, 1, &drive->bios_cyl, NULL);
- ide_add_setting(drive, "bios_head", SETTING_RW, -1, -1, TYPE_BYTE, 0, 255, 1, 1, &drive->bios_head, NULL);
- ide_add_setting(drive, "bios_sect", SETTING_RW, -1, -1, TYPE_BYTE, 0, 63, 1, 1, &drive->bios_sect, NULL);
- ide_add_setting(drive, "ticks", SETTING_RW, -1, -1, TYPE_BYTE, 0, 255, 1, 1, &floppy->ticks, NULL);
+ ide_add_setting(drive, "bios_cyl", SETTING_RW, TYPE_INT, 0, 1023, 1, 1, &drive->bios_cyl, NULL);
+ ide_add_setting(drive, "bios_head", SETTING_RW, TYPE_BYTE, 0, 255, 1, 1, &drive->bios_head, NULL);
+ ide_add_setting(drive, "bios_sect", SETTING_RW, TYPE_BYTE, 0, 63, 1, 1, &drive->bios_sect, NULL);
+ ide_add_setting(drive, "ticks", SETTING_RW, TYPE_BYTE, 0, 255, 1, 1, &floppy->ticks, NULL);
}
+#else
+static inline void idefloppy_add_settings(ide_drive_t *drive) { ; }
+#endif
/*
* Driver initialization.
@@ -1873,7 +1877,7 @@ static void ide_floppy_remove(ide_drive_t *drive)
idefloppy_floppy_t *floppy = drive->driver_data;
struct gendisk *g = floppy->disk;
- ide_unregister_subdriver(drive, floppy->driver);
+ ide_proc_unregister_driver(drive, floppy->driver);
del_gendisk(g);
@@ -1892,8 +1896,7 @@ static void ide_floppy_release(struct kref *kref)
kfree(floppy);
}
-#ifdef CONFIG_PROC_FS
-
+#ifdef CONFIG_IDE_PROC_FS
static int proc_idefloppy_read_capacity
(char *page, char **start, off_t off, int count, int *eof, void *data)
{
@@ -1909,12 +1912,7 @@ static ide_proc_entry_t idefloppy_proc[] = {
{ "geometry", S_IFREG|S_IRUGO, proc_ide_read_geometry, NULL },
{ NULL, 0, NULL, NULL }
};
-
-#else
-
-#define idefloppy_proc NULL
-
-#endif /* CONFIG_PROC_FS */
+#endif /* CONFIG_IDE_PROC_FS */
static int ide_floppy_probe(ide_drive_t *);
@@ -1933,7 +1931,9 @@ static ide_driver_t idefloppy_driver = {
.end_request = idefloppy_do_end_request,
.error = __ide_error,
.abort = __ide_abort,
+#ifdef CONFIG_IDE_PROC_FS
.proc = idefloppy_proc,
+#endif
};
static int idefloppy_open(struct inode *inode, struct file *filp)
@@ -2159,7 +2159,7 @@ static int ide_floppy_probe(ide_drive_t *drive)
ide_init_disk(g, drive);
- ide_register_subdriver(drive, &idefloppy_driver);
+ ide_proc_register_driver(drive, &idefloppy_driver);
kref_init(&floppy->kref);
diff --git a/drivers/ide/ide-generic.c b/drivers/ide/ide-generic.c
index 99fd5615113..0f72b98d727 100644
--- a/drivers/ide/ide-generic.c
+++ b/drivers/ide/ide-generic.c
@@ -22,8 +22,6 @@ static int __init ide_generic_init(void)
if (ide_hwifs[0].io_ports[IDE_DATA_OFFSET])
ide_release_lock(); /* for atari only */
- create_proc_ide_interfaces();
-
return 0;
}
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 8670112f1d3..8e568143d90 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -172,15 +172,6 @@ static ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *
memset(args, 0, sizeof(*args));
- if (drive->media != ide_disk) {
- /*
- * skip idedisk_pm_restore_pio and idedisk_pm_idle for ATAPI
- * devices
- */
- if (pm->pm_step == idedisk_pm_restore_pio)
- pm->pm_step = ide_pm_restore_dma;
- }
-
switch (pm->pm_step) {
case ide_pm_flush_cache: /* Suspend step 1 (flush cache) */
if (drive->media != ide_disk)
@@ -207,7 +198,13 @@ static ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *
case idedisk_pm_restore_pio: /* Resume step 1 (restore PIO) */
if (drive->hwif->tuneproc != NULL)
drive->hwif->tuneproc(drive, 255);
- ide_complete_power_step(drive, rq, 0, 0);
+ /*
+ * skip idedisk_pm_idle for ATAPI devices
+ */
+ if (drive->media != ide_disk)
+ pm->pm_step = ide_pm_restore_dma;
+ else
+ ide_complete_power_step(drive, rq, 0, 0);
return ide_stopped;
case idedisk_pm_idle: /* Resume step 2 (idle) */
diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c
index 3caa176b315..f0be5f665a0 100644
--- a/drivers/ide/ide-iops.c
+++ b/drivers/ide/ide-iops.c
@@ -571,51 +571,54 @@ EXPORT_SYMBOL(ide_wait_stat);
*/
u8 eighty_ninty_three (ide_drive_t *drive)
{
- if(HWIF(drive)->udma_four == 0)
- return 0;
+ ide_hwif_t *hwif = drive->hwif;
+ struct hd_driveid *id = drive->id;
+
+ if (hwif->udma_four == 0)
+ goto no_80w;
/* Check for SATA but only if we are ATA5 or higher */
- if (drive->id->hw_config == 0 && (drive->id->major_rev_num & 0x7FE0))
+ if (id->hw_config == 0 && (id->major_rev_num & 0x7FE0))
return 1;
- if (!(drive->id->hw_config & 0x6000))
- return 0;
-#ifndef CONFIG_IDEDMA_IVB
- if(!(drive->id->hw_config & 0x4000))
- return 0;
-#endif /* CONFIG_IDEDMA_IVB */
+
/*
* FIXME:
* - change master/slave IDENTIFY order
* - force bit13 (80c cable present) check
* (unless the slave device is pre-ATA3)
*/
- return 1;
-}
+#ifndef CONFIG_IDEDMA_IVB
+ if (id->hw_config & 0x4000)
+#else
+ if (id->hw_config & 0x6000)
+#endif
+ return 1;
+
+no_80w:
+ if (drive->udma33_warned == 1)
+ return 0;
-EXPORT_SYMBOL(eighty_ninty_three);
+ printk(KERN_WARNING "%s: %s side 80-wire cable detection failed, "
+ "limiting max speed to UDMA33\n",
+ drive->name, hwif->udma_four ? "drive" : "host");
+
+ drive->udma33_warned = 1;
+
+ return 0;
+}
int ide_ata66_check (ide_drive_t *drive, ide_task_t *args)
{
if ((args->tfRegister[IDE_COMMAND_OFFSET] == WIN_SETFEATURES) &&
(args->tfRegister[IDE_SECTOR_OFFSET] > XFER_UDMA_2) &&
(args->tfRegister[IDE_FEATURE_OFFSET] == SETFEATURES_XFER)) {
-#ifndef CONFIG_IDEDMA_IVB
- if ((drive->id->hw_config & 0x6000) == 0) {
-#else /* !CONFIG_IDEDMA_IVB */
- if (((drive->id->hw_config & 0x2000) == 0) ||
- ((drive->id->hw_config & 0x4000) == 0)) {
-#endif /* CONFIG_IDEDMA_IVB */
- printk("%s: Speed warnings UDMA 3/4/5 is not "
- "functional.\n", drive->name);
- return 1;
- }
- if (!HWIF(drive)->udma_four) {
- printk("%s: Speed warnings UDMA 3/4/5 is not "
- "functional.\n",
- HWIF(drive)->name);
+ if (eighty_ninty_three(drive) == 0) {
+ printk(KERN_WARNING "%s: UDMA speeds >UDMA33 cannot "
+ "be set\n", drive->name);
return 1;
}
}
+
return 0;
}
diff --git a/drivers/ide/ide-lib.c b/drivers/ide/ide-lib.c
index 68719314df3..3be3c69383f 100644
--- a/drivers/ide/ide-lib.c
+++ b/drivers/ide/ide-lib.c
@@ -69,123 +69,41 @@ char *ide_xfer_verbose (u8 xfer_rate)
EXPORT_SYMBOL(ide_xfer_verbose);
/**
- * ide_dma_speed - compute DMA speed
- * @drive: drive
- * @mode: modes available
- *
- * Checks the drive capabilities and returns the speed to use
- * for the DMA transfer. Returns 0 if the drive is incapable
- * of DMA transfers.
- */
-
-u8 ide_dma_speed(ide_drive_t *drive, u8 mode)
-{
- struct hd_driveid *id = drive->id;
- ide_hwif_t *hwif = HWIF(drive);
- u8 ultra_mask, mwdma_mask, swdma_mask;
- u8 speed = 0;
-
- if (drive->media != ide_disk && hwif->atapi_dma == 0)
- return 0;
-
- /* Capable of UltraDMA modes? */
- ultra_mask = id->dma_ultra & hwif->ultra_mask;
-
- if (!(id->field_valid & 4))
- mode = 0; /* fallback to MW/SW DMA if no UltraDMA */
-
- switch (mode) {
- case 4:
- if (ultra_mask & 0x40) {
- speed = XFER_UDMA_6;
- break;
- }
- case 3:
- if (ultra_mask & 0x20) {
- speed = XFER_UDMA_5;
- break;
- }
- case 2:
- if (ultra_mask & 0x10) {
- speed = XFER_UDMA_4;
- break;
- }
- if (ultra_mask & 0x08) {
- speed = XFER_UDMA_3;
- break;
- }
- case 1:
- if (ultra_mask & 0x04) {
- speed = XFER_UDMA_2;
- break;
- }
- if (ultra_mask & 0x02) {
- speed = XFER_UDMA_1;
- break;
- }
- if (ultra_mask & 0x01) {
- speed = XFER_UDMA_0;
- break;
- }
- case 0:
- mwdma_mask = id->dma_mword & hwif->mwdma_mask;
-
- if (mwdma_mask & 0x04) {
- speed = XFER_MW_DMA_2;
- break;
- }
- if (mwdma_mask & 0x02) {
- speed = XFER_MW_DMA_1;
- break;
- }
- if (mwdma_mask & 0x01) {
- speed = XFER_MW_DMA_0;
- break;
- }
-
- swdma_mask = id->dma_1word & hwif->swdma_mask;
-
- if (swdma_mask & 0x04) {
- speed = XFER_SW_DMA_2;
- break;
- }
- if (swdma_mask & 0x02) {
- speed = XFER_SW_DMA_1;
- break;
- }
- if (swdma_mask & 0x01) {
- speed = XFER_SW_DMA_0;
- break;
- }
- }
-
- return speed;
-}
-EXPORT_SYMBOL(ide_dma_speed);
-
-
-/**
- * ide_rate_filter - return best speed for mode
- * @mode: modes available
+ * ide_rate_filter - filter transfer mode
+ * @drive: IDE device
* @speed: desired speed
*
- * Given the available DMA/UDMA mode this function returns
+ * Given the available transfer modes this function returns
* the best available speed at or below the speed requested.
+ *
+ * FIXME: filter also PIO/SWDMA/MWDMA modes
*/
-u8 ide_rate_filter (u8 mode, u8 speed)
+u8 ide_rate_filter(ide_drive_t *drive, u8 speed)
{
#ifdef CONFIG_BLK_DEV_IDEDMA
- static u8 speed_max[] = {
- XFER_MW_DMA_2, XFER_UDMA_2, XFER_UDMA_4,
- XFER_UDMA_5, XFER_UDMA_6
- };
+ ide_hwif_t *hwif = drive->hwif;
+ u8 mask = hwif->ultra_mask, mode = XFER_MW_DMA_2;
+
+ if (hwif->udma_filter)
+ mask = hwif->udma_filter(drive);
+
+ /*
+ * TODO: speed > XFER_UDMA_2 extra check is needed to avoid false
+ * cable warning from eighty_ninty_three(), moving ide_rate_filter()
+ * calls from ->speedproc to core code will make this hack go away
+ */
+ if (speed > XFER_UDMA_2) {
+ if ((mask & 0x78) && (eighty_ninty_three(drive) == 0))
+ mask &= 0x07;
+ }
+
+ if (mask)
+ mode = fls(mask) - 1 + XFER_UDMA_0;
// printk("%s: mode 0x%02x, speed 0x%02x\n", __FUNCTION__, mode, speed);
- /* So that we remember to update this if new modes appear */
- BUG_ON(mode > 4);
- return min(speed, speed_max[mode]);
+ return min(speed, mode);
#else /* !CONFIG_BLK_DEV_IDEDMA */
return min(speed, (u8)XFER_PIO_4);
#endif /* CONFIG_BLK_DEV_IDEDMA */
diff --git a/drivers/ide/ide-pnp.c b/drivers/ide/ide-pnp.c
index 98410ca044c..2b8009c50e9 100644
--- a/drivers/ide/ide-pnp.c
+++ b/drivers/ide/ide-pnp.c
@@ -42,7 +42,7 @@ static int idepnp_probe(struct pnp_dev * dev, const struct pnp_device_id *dev_id
hw.irq = pnp_irq(dev, 0);
hw.dma = NO_DMA;
- index = ide_register_hw(&hw, &hwif);
+ index = ide_register_hw(&hw, 1, &hwif);
if (index != -1) {
printk(KERN_INFO "ide%d: generic PnP IDE interface\n", index);
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 8f15c23aa70..3cebed77f55 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -1427,6 +1427,9 @@ int ideprobe_init (void)
}
}
}
+ for (index = 0; index < MAX_HWIFS; ++index)
+ if (probe[index])
+ ide_proc_register_port(&ide_hwifs[index]);
return 0;
}
diff --git a/drivers/ide/ide-proc.c b/drivers/ide/ide-proc.c
index a9e0b30fb1f..d50bd996ff2 100644
--- a/drivers/ide/ide-proc.c
+++ b/drivers/ide/ide-proc.c
@@ -3,6 +3,8 @@
*
* Copyright (C) 1997-1998 Mark Lord
* Copyright (C) 2003 Red Hat <alan@redhat.com>
+ *
+ * Some code was moved here from ide.c, see it for original copyrights.
*/
/*
@@ -37,6 +39,8 @@
#include <asm/io.h>
+static struct proc_dir_entry *proc_ide_root;
+
static int proc_ide_read_imodel
(char *page, char **start, off_t off, int count, int *eof, void *data)
{
@@ -121,6 +125,265 @@ static int proc_ide_read_identify
PROC_IDE_READ_RETURN(page,start,off,count,eof,len);
}
+/**
+ * __ide_add_setting - add an ide setting option
+ * @drive: drive to use
+ * @name: setting name
+ * @rw: true if the function is read write
+ * @data_type: type of data
+ * @min: range minimum
+ * @max: range maximum
+ * @mul_factor: multiplication scale
+ * @div_factor: divison scale
+ * @data: private data field
+ * @set: setting
+ * @auto_remove: setting auto removal flag
+ *
+ * Removes the setting named from the device if it is present.
+ * The function takes the settings_lock to protect against
+ * parallel changes. This function must not be called from IRQ
+ * context. Returns 0 on success or -1 on failure.
+ *
+ * BUGS: This code is seriously over-engineered. There is also
+ * magic about how the driver specific features are setup. If
+ * a driver is attached we assume the driver settings are auto
+ * remove.
+ */
+
+static int __ide_add_setting(ide_drive_t *drive, const char *name, int rw, int data_type, int min, int max, int mul_factor, int div_factor, void *data, ide_procset_t *set, int auto_remove)
+{
+ ide_settings_t **p = (ide_settings_t **) &drive->settings, *setting = NULL;
+
+ down(&ide_setting_sem);
+ while ((*p) && strcmp((*p)->name, name) < 0)
+ p = &((*p)->next);
+ if ((setting = kzalloc(sizeof(*setting), GFP_KERNEL)) == NULL)
+ goto abort;
+ if ((setting->name = kmalloc(strlen(name) + 1, GFP_KERNEL)) == NULL)
+ goto abort;
+ strcpy(setting->name, name);
+ setting->rw = rw;
+ setting->data_type = data_type;
+ setting->min = min;
+ setting->max = max;
+ setting->mul_factor = mul_factor;
+ setting->div_factor = div_factor;
+ setting->data = data;
+ setting->set = set;
+
+ setting->next = *p;
+ if (auto_remove)
+ setting->auto_remove = 1;
+ *p = setting;
+ up(&ide_setting_sem);
+ return 0;
+abort:
+ up(&ide_setting_sem);
+ kfree(setting);
+ return -1;
+}
+
+int ide_add_setting(ide_drive_t *drive, const char *name, int rw, int data_type, int min, int max, int mul_factor, int div_factor, void *data, ide_procset_t *set)
+{
+ return __ide_add_setting(drive, name, rw, data_type, min, max, mul_factor, div_factor, data, set, 1);
+}
+
+EXPORT_SYMBOL(ide_add_setting);
+
+/**
+ * __ide_remove_setting - remove an ide setting option
+ * @drive: drive to use
+ * @name: setting name
+ *
+ * Removes the setting named from the device if it is present.
+ * The caller must hold the setting semaphore.
+ */
+
+static void __ide_remove_setting (ide_drive_t *drive, char *name)
+{
+ ide_settings_t **p, *setting;
+
+ p = (ide_settings_t **) &drive->settings;
+
+ while ((*p) && strcmp((*p)->name, name))
+ p = &((*p)->next);
+ if ((setting = (*p)) == NULL)
+ return;
+
+ (*p) = setting->next;
+
+ kfree(setting->name);
+ kfree(setting);
+}
+
+/**
+ * auto_remove_settings - remove driver specific settings
+ * @drive: drive
+ *
+ * Automatically remove all the driver specific settings for this
+ * drive. This function may not be called from IRQ context. The
+ * caller must hold ide_setting_sem.
+ */
+
+static void auto_remove_settings (ide_drive_t *drive)
+{
+ ide_settings_t *setting;
+repeat:
+ setting = drive->settings;
+ while (setting) {
+ if (setting->auto_remove) {
+ __ide_remove_setting(drive, setting->name);
+ goto repeat;
+ }
+ setting = setting->next;
+ }
+}
+
+/**
+ * ide_find_setting_by_name - find a drive specific setting
+ * @drive: drive to scan
+ * @name: setting name
+ *
+ * Scan's the device setting table for a matching entry and returns
+ * this or NULL if no entry is found. The caller must hold the
+ * setting semaphore
+ */
+
+static ide_settings_t *ide_find_setting_by_name(ide_drive_t *drive, char *name)
+{
+ ide_settings_t *setting = drive->settings;
+
+ while (setting) {
+ if (strcmp(setting->name, name) == 0)
+ break;
+ setting = setting->next;
+ }
+ return setting;
+}
+
+/**
+ * ide_read_setting - read an IDE setting
+ * @drive: drive to read from
+ * @setting: drive setting
+ *
+ * Read a drive setting and return the value. The caller
+ * must hold the ide_setting_sem when making this call.
+ *
+ * BUGS: the data return and error are the same return value
+ * so an error -EINVAL and true return of the same value cannot
+ * be told apart
+ */
+
+static int ide_read_setting(ide_drive_t *drive, ide_settings_t *setting)
+{
+ int val = -EINVAL;
+ unsigned long flags;
+
+ if ((setting->rw & SETTING_READ)) {
+ spin_lock_irqsave(&ide_lock, flags);
+ switch(setting->data_type) {
+ case TYPE_BYTE:
+ val = *((u8 *) setting->data);
+ break;
+ case TYPE_SHORT:
+ val = *((u16 *) setting->data);
+ break;
+ case TYPE_INT:
+ val = *((u32 *) setting->data);
+ break;
+ }
+ spin_unlock_irqrestore(&ide_lock, flags);
+ }
+ return val;
+}
+
+/**
+ * ide_write_setting - read an IDE setting
+ * @drive: drive to read from
+ * @setting: drive setting
+ * @val: value
+ *
+ * Write a drive setting if it is possible. The caller
+ * must hold the ide_setting_sem when making this call.
+ *
+ * BUGS: the data return and error are the same return value
+ * so an error -EINVAL and true return of the same value cannot
+ * be told apart
+ *
+ * FIXME: This should be changed to enqueue a special request
+ * to the driver to change settings, and then wait on a sema for completion.
+ * The current scheme of polling is kludgy, though safe enough.
+ */
+
+static int ide_write_setting(ide_drive_t *drive, ide_settings_t *setting, int val)
+{
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+ if (setting->set)
+ return setting->set(drive, val);
+ if (!(setting->rw & SETTING_WRITE))
+ return -EPERM;
+ if (val < setting->min || val > setting->max)
+ return -EINVAL;
+ if (ide_spin_wait_hwgroup(drive))
+ return -EBUSY;
+ switch (setting->data_type) {
+ case TYPE_BYTE:
+ *((u8 *) setting->data) = val;
+ break;
+ case TYPE_SHORT:
+ *((u16 *) setting->data) = val;
+ break;
+ case TYPE_INT:
+ *((u32 *) setting->data) = val;
+ break;
+ }
+ spin_unlock_irq(&ide_lock);
+ return 0;
+}
+
+static int set_xfer_rate (ide_drive_t *drive, int arg)
+{
+ int err;
+
+ if (arg < 0 || arg > 70)
+ return -EINVAL;
+
+ err = ide_wait_cmd(drive,
+ WIN_SETFEATURES, (u8) arg,
+ SETFEATURES_XFER, 0, NULL);
+
+ if (!err && arg) {
+ ide_set_xfer_rate(drive, (u8) arg);
+ ide_driveid_update(drive);
+ }
+ return err;
+}
+
+/**
+ * ide_add_generic_settings - generic ide settings
+ * @drive: drive being configured
+ *
+ * Add the generic parts of the system settings to the /proc files.
+ * The caller must not be holding the ide_setting_sem.
+ */
+
+void ide_add_generic_settings (ide_drive_t *drive)
+{
+/*
+ * drive setting name read/write access data type min max mul_factor div_factor data pointer set function
+ */
+ __ide_add_setting(drive, "io_32bit", drive->no_io_32bit ? SETTING_READ : SETTING_RW, TYPE_BYTE, 0, 1 + (SUPPORT_VLB_SYNC << 1), 1, 1, &drive->io_32bit, set_io_32bit, 0);
+ __ide_add_setting(drive, "keepsettings", SETTING_RW, TYPE_BYTE, 0, 1, 1, 1, &drive->keep_settings, NULL, 0);
+ __ide_add_setting(drive, "nice1", SETTING_RW, TYPE_BYTE, 0, 1, 1, 1, &drive->nice1, NULL, 0);
+ __ide_add_setting(drive, "pio_mode", SETTING_WRITE, TYPE_BYTE, 0, 255, 1, 1, NULL, set_pio_mode, 0);
+ __ide_add_setting(drive, "unmaskirq", drive->no_unmask ? SETTING_READ : SETTING_RW, TYPE_BYTE, 0, 1, 1, 1, &drive->unmask, NULL, 0);
+ __ide_add_setting(drive, "using_dma", SETTING_RW, TYPE_BYTE, 0, 1, 1, 1, &drive->using_dma, set_using_dma, 0);
+ __ide_add_setting(drive, "init_speed", SETTING_RW, TYPE_BYTE, 0, 70, 1, 1, &drive->init_speed, NULL, 0);
+ __ide_add_setting(drive, "current_speed", SETTING_RW, TYPE_BYTE, 0, 70, 1, 1, &drive->current_speed, set_xfer_rate, 0);
+ __ide_add_setting(drive, "number", SETTING_RW, TYPE_BYTE, 0, 3, 1, 1, &drive->dn, NULL, 0);
+}
+
static void proc_ide_settings_warn(void)
{
static int warned = 0;
@@ -399,7 +662,7 @@ static ide_proc_entry_t generic_drive_entries[] = {
{ NULL, 0, NULL, NULL }
};
-void ide_add_proc_entries(struct proc_dir_entry *dir, ide_proc_entry_t *p, void *data)
+static void ide_add_proc_entries(struct proc_dir_entry *dir, ide_proc_entry_t *p, void *data)
{
struct proc_dir_entry *ent;
@@ -415,7 +678,7 @@ void ide_add_proc_entries(struct proc_dir_entry *dir, ide_proc_entry_t *p, void
}
}
-void ide_remove_proc_entries(struct proc_dir_entry *dir, ide_proc_entry_t *p)
+static void ide_remove_proc_entries(struct proc_dir_entry *dir, ide_proc_entry_t *p)
{
if (!dir || !p)
return;
@@ -425,6 +688,51 @@ void ide_remove_proc_entries(struct proc_dir_entry *dir, ide_proc_entry_t *p)
}
}
+void ide_proc_register_driver(ide_drive_t *drive, ide_driver_t *driver)
+{
+ ide_add_proc_entries(drive->proc, driver->proc, drive);
+}
+
+EXPORT_SYMBOL(ide_proc_register_driver);
+
+/**
+ * ide_proc_unregister_driver - remove driver specific data
+ * @drive: drive
+ * @driver: driver
+ *
+ * Clean up the driver specific /proc files and IDE settings
+ * for a given drive.
+ *
+ * Takes ide_setting_sem and ide_lock.
+ * Caller must hold none of the locks.
+ */
+
+void ide_proc_unregister_driver(ide_drive_t *drive, ide_driver_t *driver)
+{
+ unsigned long flags;
+
+ ide_remove_proc_entries(drive->proc, driver->proc);
+
+ down(&ide_setting_sem);
+ spin_lock_irqsave(&ide_lock, flags);
+ /*
+ * ide_setting_sem protects the settings list
+ * ide_lock protects the use of settings
+ *
+ * so we need to hold both, ide_settings_sem because we want to
+ * modify the settings list, and ide_lock because we cannot take
+ * a setting out that is being used.
+ *
+ * OTOH both ide_{read,write}_setting are only ever used under
+ * ide_setting_sem.
+ */
+ auto_remove_settings(drive);
+ spin_unlock_irqrestore(&ide_lock, flags);
+ up(&ide_setting_sem);
+}
+
+EXPORT_SYMBOL(ide_proc_unregister_driver);
+
static void create_proc_ide_drives(ide_hwif_t *hwif)
{
int d;
@@ -477,26 +785,24 @@ static ide_proc_entry_t hwif_entries[] = {
{ NULL, 0, NULL, NULL }
};
-void create_proc_ide_interfaces(void)
+void ide_proc_register_port(ide_hwif_t *hwif)
{
- int h;
+ if (!hwif->present)
+ return;
- for (h = 0; h < MAX_HWIFS; h++) {
- ide_hwif_t *hwif = &ide_hwifs[h];
+ if (!hwif->proc) {
+ hwif->proc = proc_mkdir(hwif->name, proc_ide_root);
- if (!hwif->present)
- continue;
- if (!hwif->proc) {
- hwif->proc = proc_mkdir(hwif->name, proc_ide_root);
- if (!hwif->proc)
- return;
- ide_add_proc_entries(hwif->proc, hwif_entries, hwif);
- }
- create_proc_ide_drives(hwif);
+ if (!hwif->proc)
+ return;
+
+ ide_add_proc_entries(hwif->proc, hwif_entries, hwif);
}
+
+ create_proc_ide_drives(hwif);
}
-EXPORT_SYMBOL(create_proc_ide_interfaces);
+EXPORT_SYMBOL_GPL(ide_proc_register_port);
#ifdef CONFIG_BLK_DEV_IDEPCI
void ide_pci_create_host_proc(const char *name, get_info_t *get_info)
@@ -507,7 +813,7 @@ void ide_pci_create_host_proc(const char *name, get_info_t *get_info)
EXPORT_SYMBOL_GPL(ide_pci_create_host_proc);
#endif
-void destroy_proc_ide_interface(ide_hwif_t *hwif)
+void ide_proc_unregister_port(ide_hwif_t *hwif)
{
if (hwif->proc) {
destroy_proc_ide_drives(hwif);
@@ -554,11 +860,11 @@ void proc_ide_create(void)
{
struct proc_dir_entry *entry;
+ proc_ide_root = proc_mkdir("ide", NULL);
+
if (!proc_ide_root)
return;
- create_proc_ide_interfaces();
-
entry = create_proc_entry("drivers", 0, proc_ide_root);
if (entry)
entry->proc_fops = &ide_drivers_operations;
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
index 4e59239fef7..e82bfa5e0ab 100644
--- a/drivers/ide/ide-tape.c
+++ b/drivers/ide/ide-tape.c
@@ -4561,28 +4561,33 @@ static void idetape_get_blocksize_from_block_descriptor(ide_drive_t *drive)
printk(KERN_INFO "ide-tape: Adjusted block size - %d\n", tape->tape_block_size);
#endif /* IDETAPE_DEBUG_INFO */
}
+
+#ifdef CONFIG_IDE_PROC_FS
static void idetape_add_settings (ide_drive_t *drive)
{
idetape_tape_t *tape = drive->driver_data;
/*
- * drive setting name read/write ioctl ioctl data type min max mul_factor div_factor data pointer set function
+ * drive setting name read/write data type min max mul_factor div_factor data pointer set function
*/
- ide_add_setting(drive, "buffer", SETTING_READ, -1, -1, TYPE_SHORT, 0, 0xffff, 1, 2, &tape->capabilities.buffer_size, NULL);
- ide_add_setting(drive, "pipeline_min", SETTING_RW, -1, -1, TYPE_INT, 1, 0xffff, tape->stage_size / 1024, 1, &tape->min_pipeline, NULL);
- ide_add_setting(drive, "pipeline", SETTING_RW, -1, -1, TYPE_INT, 1, 0xffff, tape->stage_size / 1024, 1, &tape->max_stages, NULL);
- ide_add_setting(drive, "pipeline_max", SETTING_RW, -1, -1, TYPE_INT, 1, 0xffff, tape->stage_size / 1024, 1, &tape->max_pipeline, NULL);
- ide_add_setting(drive, "pipeline_used",SETTING_READ, -1, -1, TYPE_INT, 0, 0xffff, tape->stage_size / 1024, 1, &tape->nr_stages, NULL);
- ide_add_setting(drive, "pipeline_pending",SETTING_READ,-1, -1, TYPE_INT, 0, 0xffff, tape->stage_size / 1024, 1, &tape->nr_pending_stages, NULL);
- ide_add_setting(drive, "speed", SETTING_READ, -1, -1, TYPE_SHORT, 0, 0xffff, 1, 1, &tape->capabilities.speed, NULL);
- ide_add_setting(drive, "stage", SETTING_READ, -1, -1, TYPE_INT, 0, 0xffff, 1, 1024, &tape->stage_size, NULL);
- ide_add_setting(drive, "tdsc", SETTING_RW, -1, -1, TYPE_INT, IDETAPE_DSC_RW_MIN, IDETAPE_DSC_RW_MAX, 1000, HZ, &tape->best_dsc_rw_frequency, NULL);
- ide_add_setting(drive, "dsc_overlap", SETTING_RW, -1, -1, TYPE_BYTE, 0, 1, 1, 1, &drive->dsc_overlap, NULL);
- ide_add_setting(drive, "pipeline_head_speed_c",SETTING_READ, -1, -1, TYPE_INT, 0, 0xffff, 1, 1, &tape->controlled_pipeline_head_speed, NULL);
- ide_add_setting(drive, "pipeline_head_speed_u",SETTING_READ, -1, -1, TYPE_INT, 0, 0xffff, 1, 1, &tape->uncontrolled_pipeline_head_speed, NULL);
- ide_add_setting(drive, "avg_speed", SETTING_READ, -1, -1, TYPE_INT, 0, 0xffff, 1, 1, &tape->avg_speed, NULL);
- ide_add_setting(drive, "debug_level",SETTING_RW, -1, -1, TYPE_INT, 0, 0xffff, 1, 1, &tape->debug_level, NULL);
+ ide_add_setting(drive, "buffer", SETTING_READ, TYPE_SHORT, 0, 0xffff, 1, 2, &tape->capabilities.buffer_size, NULL);
+ ide_add_setting(drive, "pipeline_min", SETTING_RW, TYPE_INT, 1, 0xffff, tape->stage_size / 1024, 1, &tape->min_pipeline, NULL);
+ ide_add_setting(drive, "pipeline", SETTING_RW, TYPE_INT, 1, 0xffff, tape->stage_size / 1024, 1, &tape->max_stages, NULL);
+ ide_add_setting(drive, "pipeline_max", SETTING_RW, TYPE_INT, 1, 0xffff, tape->stage_size / 1024, 1, &tape->max_pipeline, NULL);
+ ide_add_setting(drive, "pipeline_used", SETTING_READ, TYPE_INT, 0, 0xffff, tape->stage_size / 1024, 1, &tape->nr_stages, NULL);
+ ide_add_setting(drive, "pipeline_pending", SETTING_READ, TYPE_INT, 0, 0xffff, tape->stage_size / 1024, 1, &tape->nr_pending_stages, NULL);
+ ide_add_setting(drive, "speed", SETTING_READ, TYPE_SHORT, 0, 0xffff, 1, 1, &tape->capabilities.speed, NULL);
+ ide_add_setting(drive, "stage", SETTING_READ, TYPE_INT, 0, 0xffff, 1, 1024, &tape->stage_size, NULL);
+ ide_add_setting(drive, "tdsc", SETTING_RW, TYPE_INT, IDETAPE_DSC_RW_MIN, IDETAPE_DSC_RW_MAX, 1000, HZ, &tape->best_dsc_rw_frequency, NULL);
+ ide_add_setting(drive, "dsc_overlap", SETTING_RW, TYPE_BYTE, 0, 1, 1, 1, &drive->dsc_overlap, NULL);
+ ide_add_setting(drive, "pipeline_head_speed_c",SETTING_READ, TYPE_INT, 0, 0xffff, 1, 1, &tape->controlled_pipeline_head_speed, NULL);
+ ide_add_setting(drive, "pipeline_head_speed_u",SETTING_READ, TYPE_INT, 0, 0xffff, 1, 1, &tape->uncontrolled_pipeline_head_speed,NULL);
+ ide_add_setting(drive, "avg_speed", SETTING_READ, TYPE_INT, 0, 0xffff, 1, 1, &tape->avg_speed, NULL);
+ ide_add_setting(drive, "debug_level", SETTING_RW, TYPE_INT, 0, 0xffff, 1, 1, &tape->debug_level, NULL);
}
+#else
+static inline void idetape_add_settings(ide_drive_t *drive) { ; }
+#endif
/*
* ide_setup is called to:
@@ -4703,7 +4708,7 @@ static void ide_tape_remove(ide_drive_t *drive)
{
idetape_tape_t *tape = drive->driver_data;
- ide_unregister_subdriver(drive, tape->driver);
+ ide_proc_unregister_driver(drive, tape->driver);
ide_unregister_region(tape->disk);
@@ -4730,8 +4735,7 @@ static void ide_tape_release(struct kref *kref)
kfree(tape);
}
-#ifdef CONFIG_PROC_FS
-
+#ifdef CONFIG_IDE_PROC_FS
static int proc_idetape_read_name
(char *page, char **start, off_t off, int count, int *eof, void *data)
{
@@ -4749,11 +4753,6 @@ static ide_proc_entry_t idetape_proc[] = {
{ "name", S_IFREG|S_IRUGO, proc_idetape_read_name, NULL },
{ NULL, 0, NULL, NULL }
};
-
-#else
-
-#define idetape_proc NULL
-
#endif
static int ide_tape_probe(ide_drive_t *);
@@ -4773,7 +4772,9 @@ static ide_driver_t idetape_driver = {
.end_request = idetape_end_request,
.error = __ide_error,
.abort = __ide_abort,
+#ifdef CONFIG_IDE_PROC_FS
.proc = idetape_proc,
+#endif
};
/*
@@ -4864,7 +4865,7 @@ static int ide_tape_probe(ide_drive_t *drive)
ide_init_disk(g, drive);
- ide_register_subdriver(drive, &idetape_driver);
+ ide_proc_register_driver(drive, &idetape_driver);
kref_init(&tape->kref);
diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c
index ae5bf2be6f5..f2b547ff772 100644
--- a/drivers/ide/ide.c
+++ b/drivers/ide/ide.c
@@ -168,12 +168,11 @@ static const u8 ide_hwif_to_major[] = { IDE0_MAJOR, IDE1_MAJOR,
static int idebus_parameter; /* holds the "idebus=" parameter */
static int system_bus_speed; /* holds what we think is VESA/PCI bus speed */
-static int initializing; /* set while initializing built-in drivers */
DECLARE_MUTEX(ide_cfg_sem);
__cacheline_aligned_in_smp DEFINE_SPINLOCK(ide_lock);
-#ifdef CONFIG_BLK_DEV_IDEPCI
+#ifdef CONFIG_IDEPCI_PCIBUS_ORDER
static int ide_scan_direction; /* THIS was formerly 2.2.x pci=reverse */
#endif
@@ -216,9 +215,6 @@ static void init_hwif_data(ide_hwif_t *hwif, unsigned int index)
hwif->bus_state = BUSSTATE_ON;
hwif->atapi_dma = 0; /* disable all atapi dma */
- hwif->ultra_mask = 0x80; /* disable all ultra */
- hwif->mwdma_mask = 0x80; /* disable all mwdma */
- hwif->swdma_mask = 0x80; /* disable all swdma */
init_completion(&hwif->gendev_rel_comp);
@@ -305,9 +301,7 @@ static void __init init_ide_data (void)
#endif
}
#ifdef CONFIG_IDE_ARM
- initializing = 1;
ide_arm_init();
- initializing = 0;
#endif
}
@@ -353,10 +347,6 @@ static int ide_system_bus_speed(void)
return system_bus_speed;
}
-#ifdef CONFIG_PROC_FS
-struct proc_dir_entry *proc_ide_root;
-#endif
-
static struct resource* hwif_request_region(ide_hwif_t *hwif,
unsigned long addr, int num)
{
@@ -480,6 +470,7 @@ static void ide_hwif_restore(ide_hwif_t *hwif, ide_hwif_t *tmp_hwif)
hwif->tuneproc = tmp_hwif->tuneproc;
hwif->speedproc = tmp_hwif->speedproc;
+ hwif->udma_filter = tmp_hwif->udma_filter;
hwif->selectproc = tmp_hwif->selectproc;
hwif->reset_poll = tmp_hwif->reset_poll;
hwif->pre_reset = tmp_hwif->pre_reset;
@@ -599,7 +590,7 @@ void ide_unregister(unsigned int index)
spin_unlock_irq(&ide_lock);
- destroy_proc_ide_interface(hwif);
+ ide_proc_unregister_port(hwif);
hwgroup = hwif->hwgroup;
/*
@@ -751,6 +742,7 @@ void ide_setup_ports ( hw_regs_t *hw,
/**
* ide_register_hw_with_fixup - register IDE interface
* @hw: hardware registers
+ * @initializing: set while initializing built-in drivers
* @hwifp: pointer to returned hwif
* @fixup: fixup function
*
@@ -760,7 +752,9 @@ void ide_setup_ports ( hw_regs_t *hw,
* Returns -1 on error.
*/
-int ide_register_hw_with_fixup(hw_regs_t *hw, ide_hwif_t **hwifp, void(*fixup)(ide_hwif_t *hwif))
+int ide_register_hw_with_fixup(hw_regs_t *hw, int initializing,
+ ide_hwif_t **hwifp,
+ void(*fixup)(ide_hwif_t *hwif))
{
int index, retry = 1;
ide_hwif_t *hwif;
@@ -801,7 +795,7 @@ found:
if (!initializing) {
probe_hwif_init_with_fixup(hwif, fixup);
- create_proc_ide_interfaces();
+ ide_proc_register_port(hwif);
}
if (hwifp)
@@ -812,9 +806,9 @@ found:
EXPORT_SYMBOL(ide_register_hw_with_fixup);
-int ide_register_hw(hw_regs_t *hw, ide_hwif_t **hwifp)
+int ide_register_hw(hw_regs_t *hw, int initializing, ide_hwif_t **hwifp)
{
- return ide_register_hw_with_fixup(hw, hwifp, NULL);
+ return ide_register_hw_with_fixup(hw, initializing, hwifp, NULL);
}
EXPORT_SYMBOL(ide_register_hw);
@@ -825,205 +819,7 @@ EXPORT_SYMBOL(ide_register_hw);
DECLARE_MUTEX(ide_setting_sem);
-/**
- * __ide_add_setting - add an ide setting option
- * @drive: drive to use
- * @name: setting name
- * @rw: true if the function is read write
- * @read_ioctl: function to call on read
- * @write_ioctl: function to call on write
- * @data_type: type of data
- * @min: range minimum
- * @max: range maximum
- * @mul_factor: multiplication scale
- * @div_factor: divison scale
- * @data: private data field
- * @set: setting
- * @auto_remove: setting auto removal flag
- *
- * Removes the setting named from the device if it is present.
- * The function takes the settings_lock to protect against
- * parallel changes. This function must not be called from IRQ
- * context. Returns 0 on success or -1 on failure.
- *
- * BUGS: This code is seriously over-engineered. There is also
- * magic about how the driver specific features are setup. If
- * a driver is attached we assume the driver settings are auto
- * remove.
- */
-
-static int __ide_add_setting(ide_drive_t *drive, const char *name, int rw, int read_ioctl, int write_ioctl, int data_type, int min, int max, int mul_factor, int div_factor, void *data, ide_procset_t *set, int auto_remove)
-{
- ide_settings_t **p = (ide_settings_t **) &drive->settings, *setting = NULL;
-
- down(&ide_setting_sem);
- while ((*p) && strcmp((*p)->name, name) < 0)
- p = &((*p)->next);
- if ((setting = kzalloc(sizeof(*setting), GFP_KERNEL)) == NULL)
- goto abort;
- if ((setting->name = kmalloc(strlen(name) + 1, GFP_KERNEL)) == NULL)
- goto abort;
- strcpy(setting->name, name);
- setting->rw = rw;
- setting->read_ioctl = read_ioctl;
- setting->write_ioctl = write_ioctl;
- setting->data_type = data_type;
- setting->min = min;
- setting->max = max;
- setting->mul_factor = mul_factor;
- setting->div_factor = div_factor;
- setting->data = data;
- setting->set = set;
-
- setting->next = *p;
- if (auto_remove)
- setting->auto_remove = 1;
- *p = setting;
- up(&ide_setting_sem);
- return 0;
-abort:
- up(&ide_setting_sem);
- kfree(setting);
- return -1;
-}
-
-int ide_add_setting(ide_drive_t *drive, const char *name, int rw, int read_ioctl, int write_ioctl, int data_type, int min, int max, int mul_factor, int div_factor, void *data, ide_procset_t *set)
-{
- return __ide_add_setting(drive, name, rw, read_ioctl, write_ioctl, data_type, min, max, mul_factor, div_factor, data, set, 1);
-}
-
-EXPORT_SYMBOL(ide_add_setting);
-
-/**
- * __ide_remove_setting - remove an ide setting option
- * @drive: drive to use
- * @name: setting name
- *
- * Removes the setting named from the device if it is present.
- * The caller must hold the setting semaphore.
- */
-
-static void __ide_remove_setting (ide_drive_t *drive, char *name)
-{
- ide_settings_t **p, *setting;
-
- p = (ide_settings_t **) &drive->settings;
-
- while ((*p) && strcmp((*p)->name, name))
- p = &((*p)->next);
- if ((setting = (*p)) == NULL)
- return;
-
- (*p) = setting->next;
-
- kfree(setting->name);
- kfree(setting);
-}
-
-/**
- * ide_find_setting_by_ioctl - find a drive specific ioctl
- * @drive: drive to scan
- * @cmd: ioctl command to handle
- *
- * Scan's the device setting table for a matching entry and returns
- * this or NULL if no entry is found. The caller must hold the
- * setting semaphore
- */
-
-static ide_settings_t *ide_find_setting_by_ioctl (ide_drive_t *drive, int cmd)
-{
- ide_settings_t *setting = drive->settings;
-
- while (setting) {
- if (setting->read_ioctl == cmd || setting->write_ioctl == cmd)
- break;
- setting = setting->next;
- }
-
- return setting;
-}
-
-/**
- * ide_find_setting_by_name - find a drive specific setting
- * @drive: drive to scan
- * @name: setting name
- *
- * Scan's the device setting table for a matching entry and returns
- * this or NULL if no entry is found. The caller must hold the
- * setting semaphore
- */
-
-ide_settings_t *ide_find_setting_by_name (ide_drive_t *drive, char *name)
-{
- ide_settings_t *setting = drive->settings;
-
- while (setting) {
- if (strcmp(setting->name, name) == 0)
- break;
- setting = setting->next;
- }
- return setting;
-}
-
-/**
- * auto_remove_settings - remove driver specific settings
- * @drive: drive
- *
- * Automatically remove all the driver specific settings for this
- * drive. This function may not be called from IRQ context. The
- * caller must hold ide_setting_sem.
- */
-
-static void auto_remove_settings (ide_drive_t *drive)
-{
- ide_settings_t *setting;
-repeat:
- setting = drive->settings;
- while (setting) {
- if (setting->auto_remove) {
- __ide_remove_setting(drive, setting->name);
- goto repeat;
- }
- setting = setting->next;
- }
-}
-
-/**
- * ide_read_setting - read an IDE setting
- * @drive: drive to read from
- * @setting: drive setting
- *
- * Read a drive setting and return the value. The caller
- * must hold the ide_setting_sem when making this call.
- *
- * BUGS: the data return and error are the same return value
- * so an error -EINVAL and true return of the same value cannot
- * be told apart
- */
-
-int ide_read_setting (ide_drive_t *drive, ide_settings_t *setting)
-{
- int val = -EINVAL;
- unsigned long flags;
-
- if ((setting->rw & SETTING_READ)) {
- spin_lock_irqsave(&ide_lock, flags);
- switch(setting->data_type) {
- case TYPE_BYTE:
- val = *((u8 *) setting->data);
- break;
- case TYPE_SHORT:
- val = *((u16 *) setting->data);
- break;
- case TYPE_INT:
- case TYPE_INTA:
- val = *((u32 *) setting->data);
- break;
- }
- spin_unlock_irqrestore(&ide_lock, flags);
- }
- return val;
-}
+EXPORT_SYMBOL_GPL(ide_setting_sem);
/**
* ide_spin_wait_hwgroup - wait for group
@@ -1058,61 +854,14 @@ int ide_spin_wait_hwgroup (ide_drive_t *drive)
EXPORT_SYMBOL(ide_spin_wait_hwgroup);
-/**
- * ide_write_setting - read an IDE setting
- * @drive: drive to read from
- * @setting: drive setting
- * @val: value
- *
- * Write a drive setting if it is possible. The caller
- * must hold the ide_setting_sem when making this call.
- *
- * BUGS: the data return and error are the same return value
- * so an error -EINVAL and true return of the same value cannot
- * be told apart
- *
- * FIXME: This should be changed to enqueue a special request
- * to the driver to change settings, and then wait on a sema for completion.
- * The current scheme of polling is kludgy, though safe enough.
- */
-
-int ide_write_setting (ide_drive_t *drive, ide_settings_t *setting, int val)
+int set_io_32bit(ide_drive_t *drive, int arg)
{
- int i;
- u32 *p;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EACCES;
- if (!(setting->rw & SETTING_WRITE))
+ if (drive->no_io_32bit)
return -EPERM;
- if (val < setting->min || val > setting->max)
+
+ if (arg < 0 || arg > 1 + (SUPPORT_VLB_SYNC << 1))
return -EINVAL;
- if (setting->set)
- return setting->set(drive, val);
- if (ide_spin_wait_hwgroup(drive))
- return -EBUSY;
- switch (setting->data_type) {
- case TYPE_BYTE:
- *((u8 *) setting->data) = val;
- break;
- case TYPE_SHORT:
- *((u16 *) setting->data) = val;
- break;
- case TYPE_INT:
- *((u32 *) setting->data) = val;
- break;
- case TYPE_INTA:
- p = (u32 *) setting->data;
- for (i = 0; i < 1 << PARTN_BITS; i++, p++)
- *p = val;
- break;
- }
- spin_unlock_irq(&ide_lock);
- return 0;
-}
-static int set_io_32bit(ide_drive_t *drive, int arg)
-{
drive->io_32bit = arg;
#ifdef CONFIG_BLK_DEV_DTC2278
if (HWIF(drive)->chipset == ide_dtc2278)
@@ -1121,12 +870,28 @@ static int set_io_32bit(ide_drive_t *drive, int arg)
return 0;
}
-static int set_using_dma (ide_drive_t *drive, int arg)
+static int set_ksettings(ide_drive_t *drive, int arg)
+{
+ if (arg < 0 || arg > 1)
+ return -EINVAL;
+
+ if (ide_spin_wait_hwgroup(drive))
+ return -EBUSY;
+ drive->keep_settings = arg;
+ spin_unlock_irq(&ide_lock);
+
+ return 0;
+}
+
+int set_using_dma(ide_drive_t *drive, int arg)
{
#ifdef CONFIG_BLK_DEV_IDEDMA
ide_hwif_t *hwif = drive->hwif;
int err = -EPERM;
+ if (arg < 0 || arg > 1)
+ return -EINVAL;
+
if (!drive->id || !(drive->id->capability & 1))
goto out;
@@ -1159,14 +924,20 @@ static int set_using_dma (ide_drive_t *drive, int arg)
out:
return err;
#else
+ if (arg < 0 || arg > 1)
+ return -EINVAL;
+
return -EPERM;
#endif
}
-static int set_pio_mode (ide_drive_t *drive, int arg)
+int set_pio_mode(ide_drive_t *drive, int arg)
{
struct request rq;
+ if (arg < 0 || arg > 255)
+ return -EINVAL;
+
if (!HWIF(drive)->tuneproc)
return -ENOSYS;
if (drive->special.b.set_tune)
@@ -1178,42 +949,20 @@ static int set_pio_mode (ide_drive_t *drive, int arg)
return 0;
}
-static int set_xfer_rate (ide_drive_t *drive, int arg)
+static int set_unmaskirq(ide_drive_t *drive, int arg)
{
- int err = ide_wait_cmd(drive,
- WIN_SETFEATURES, (u8) arg,
- SETFEATURES_XFER, 0, NULL);
+ if (drive->no_unmask)
+ return -EPERM;
- if (!err && arg) {
- ide_set_xfer_rate(drive, (u8) arg);
- ide_driveid_update(drive);
- }
- return err;
-}
+ if (arg < 0 || arg > 1)
+ return -EINVAL;
-/**
- * ide_add_generic_settings - generic ide settings
- * @drive: drive being configured
- *
- * Add the generic parts of the system settings to the /proc files and
- * ioctls for this IDE device. The caller must not be holding the
- * ide_setting_sem.
- */
+ if (ide_spin_wait_hwgroup(drive))
+ return -EBUSY;
+ drive->unmask = arg;
+ spin_unlock_irq(&ide_lock);
-void ide_add_generic_settings (ide_drive_t *drive)
-{
-/*
- * drive setting name read/write access read ioctl write ioctl data type min max mul_factor div_factor data pointer set function
- */
- __ide_add_setting(drive, "io_32bit", drive->no_io_32bit ? SETTING_READ : SETTING_RW, HDIO_GET_32BIT, HDIO_SET_32BIT, TYPE_BYTE, 0, 1 + (SUPPORT_VLB_SYNC << 1), 1, 1, &drive->io_32bit, set_io_32bit, 0);
- __ide_add_setting(drive, "keepsettings", SETTING_RW, HDIO_GET_KEEPSETTINGS, HDIO_SET_KEEPSETTINGS, TYPE_BYTE, 0, 1, 1, 1, &drive->keep_settings, NULL, 0);
- __ide_add_setting(drive, "nice1", SETTING_RW, -1, -1, TYPE_BYTE, 0, 1, 1, 1, &drive->nice1, NULL, 0);
- __ide_add_setting(drive, "pio_mode", SETTING_WRITE, -1, HDIO_SET_PIO_MODE, TYPE_BYTE, 0, 255, 1, 1, NULL, set_pio_mode, 0);
- __ide_add_setting(drive, "unmaskirq", drive->no_unmask ? SETTING_READ : SETTING_RW, HDIO_GET_UNMASKINTR, HDIO_SET_UNMASKINTR, TYPE_BYTE, 0, 1, 1, 1, &drive->unmask, NULL, 0);
- __ide_add_setting(drive, "using_dma", SETTING_RW, HDIO_GET_DMA, HDIO_SET_DMA, TYPE_BYTE, 0, 1, 1, 1, &drive->using_dma, set_using_dma, 0);
- __ide_add_setting(drive, "init_speed", SETTING_RW, -1, -1, TYPE_BYTE, 0, 70, 1, 1, &drive->init_speed, NULL, 0);
- __ide_add_setting(drive, "current_speed", SETTING_RW, -1, -1, TYPE_BYTE, 0, 70, 1, 1, &drive->current_speed, set_xfer_rate, 0);
- __ide_add_setting(drive, "number", SETTING_RW, -1, -1, TYPE_BYTE, 0, 3, 1, 1, &drive->dn, NULL, 0);
+ return 0;
}
/**
@@ -1285,27 +1034,23 @@ static int generic_ide_resume(struct device *dev)
int generic_ide_ioctl(ide_drive_t *drive, struct file *file, struct block_device *bdev,
unsigned int cmd, unsigned long arg)
{
- ide_settings_t *setting;
+ unsigned long flags;
ide_driver_t *drv;
- int err = 0;
void __user *p = (void __user *)arg;
+ int err = 0, (*setfunc)(ide_drive_t *, int);
+ u8 *val;
- down(&ide_setting_sem);
- if ((setting = ide_find_setting_by_ioctl(drive, cmd)) != NULL) {
- if (cmd == setting->read_ioctl) {
- err = ide_read_setting(drive, setting);
- up(&ide_setting_sem);
- return err >= 0 ? put_user(err, (long __user *)arg) : err;
- } else {
- if (bdev != bdev->bd_contains)
- err = -EINVAL;
- else
- err = ide_write_setting(drive, setting, arg);
- up(&ide_setting_sem);
- return err;
- }
+ switch (cmd) {
+ case HDIO_GET_32BIT: val = &drive->io_32bit; goto read_val;
+ case HDIO_GET_KEEPSETTINGS: val = &drive->keep_settings; goto read_val;
+ case HDIO_GET_UNMASKINTR: val = &drive->unmask; goto read_val;
+ case HDIO_GET_DMA: val = &drive->using_dma; goto read_val;
+ case HDIO_SET_32BIT: setfunc = set_io_32bit; goto set_val;
+ case HDIO_SET_KEEPSETTINGS: setfunc = set_ksettings; goto set_val;
+ case HDIO_SET_PIO_MODE: setfunc = set_pio_mode; goto set_val;
+ case HDIO_SET_UNMASKINTR: setfunc = set_unmaskirq; goto set_val;
+ case HDIO_SET_DMA: setfunc = set_using_dma; goto set_val;
}
- up(&ide_setting_sem);
switch (cmd) {
case HDIO_OBSOLETE_IDENTITY:
@@ -1359,7 +1104,7 @@ int generic_ide_ioctl(ide_drive_t *drive, struct file *file, struct block_device
ide_init_hwif_ports(&hw, (unsigned long) args[0],
(unsigned long) args[1], NULL);
hw.irq = args[2];
- if (ide_register_hw(&hw, NULL) == -1)
+ if (ide_register_hw(&hw, 0, NULL) == -1)
return -EIO;
return 0;
}
@@ -1434,6 +1179,28 @@ int generic_ide_ioctl(ide_drive_t *drive, struct file *file, struct block_device
default:
return -EINVAL;
}
+
+read_val:
+ down(&ide_setting_sem);
+ spin_lock_irqsave(&ide_lock, flags);
+ err = *val;
+ spin_unlock_irqrestore(&ide_lock, flags);
+ up(&ide_setting_sem);
+ return err >= 0 ? put_user(err, (long __user *)arg) : err;
+
+set_val:
+ if (bdev != bdev->bd_contains)
+ err = -EINVAL;
+ else {
+ if (!capable(CAP_SYS_ADMIN))
+ err = -EACCES;
+ else {
+ down(&ide_setting_sem);
+ err = setfunc(drive, arg);
+ up(&ide_setting_sem);
+ }
+ }
+ return err;
}
EXPORT_SYMBOL(generic_ide_ioctl);
@@ -1566,13 +1333,13 @@ static int __init ide_setup(char *s)
return 1;
}
-#ifdef CONFIG_BLK_DEV_IDEPCI
+#ifdef CONFIG_IDEPCI_PCIBUS_ORDER
if (!strcmp(s, "ide=reverse")) {
ide_scan_direction = 1;
printk(" : Enabled support for IDE inverse scan order.\n");
return 1;
}
-#endif /* CONFIG_BLK_DEV_IDEPCI */
+#endif
#ifdef CONFIG_BLK_DEV_IDEACPI
if (!strcmp(s, "ide=noacpi")) {
@@ -1832,9 +1599,9 @@ extern void __init h8300_ide_init(void);
*/
static void __init probe_for_hwifs (void)
{
-#ifdef CONFIG_BLK_DEV_IDEPCI
+#ifdef CONFIG_IDEPCI_PCIBUS_ORDER
ide_scan_pcibus(ide_scan_direction);
-#endif /* CONFIG_BLK_DEV_IDEPCI */
+#endif
#ifdef CONFIG_ETRAX_IDE
{
@@ -1892,54 +1659,6 @@ static void __init probe_for_hwifs (void)
#endif
}
-void ide_register_subdriver(ide_drive_t *drive, ide_driver_t *driver)
-{
-#ifdef CONFIG_PROC_FS
- ide_add_proc_entries(drive->proc, driver->proc, drive);
-#endif
-}
-
-EXPORT_SYMBOL(ide_register_subdriver);
-
-/**
- * ide_unregister_subdriver - disconnect drive from driver
- * @drive: drive to unplug
- * @driver: driver
- *
- * Disconnect a drive from the driver it was attached to and then
- * clean up the various proc files and other objects attached to it.
- *
- * Takes ide_setting_sem and ide_lock.
- * Caller must hold none of the locks.
- */
-
-void ide_unregister_subdriver(ide_drive_t *drive, ide_driver_t *driver)
-{
- unsigned long flags;
-
-#ifdef CONFIG_PROC_FS
- ide_remove_proc_entries(drive->proc, driver->proc);
-#endif
- down(&ide_setting_sem);
- spin_lock_irqsave(&ide_lock, flags);
- /*
- * ide_setting_sem protects the settings list
- * ide_lock protects the use of settings
- *
- * so we need to hold both, ide_settings_sem because we want to
- * modify the settings list, and ide_lock because we cannot take
- * a setting out that is being used.
- *
- * OTOH both ide_{read,write}_setting are only ever used under
- * ide_setting_sem.
- */
- auto_remove_settings(drive);
- spin_unlock_irqrestore(&ide_lock, flags);
- up(&ide_setting_sem);
-}
-
-EXPORT_SYMBOL(ide_unregister_subdriver);
-
/*
* Probe module
*/
@@ -2071,9 +1790,7 @@ static int __init ide_init(void)
init_ide_data();
-#ifdef CONFIG_PROC_FS
- proc_ide_root = proc_mkdir("ide", NULL);
-#endif
+ proc_ide_create();
#ifdef CONFIG_BLK_DEV_ALI14XX
if (probe_ali14xx)
@@ -2096,14 +1813,9 @@ static int __init ide_init(void)
(void)qd65xx_init();
#endif
- initializing = 1;
/* Probe for special PCI and other "known" interface chipsets. */
probe_for_hwifs();
- initializing = 0;
-#ifdef CONFIG_PROC_FS
- proc_ide_create();
-#endif
return 0;
}
@@ -2143,9 +1855,7 @@ void __exit cleanup_module (void)
pnpide_exit();
#endif
-#ifdef CONFIG_PROC_FS
proc_ide_destroy();
-#endif
bus_unregister(&ide_bus_type);
}
diff --git a/drivers/ide/legacy/ali14xx.c b/drivers/ide/legacy/ali14xx.c
index 91961aa0304..df17ed68c0b 100644
--- a/drivers/ide/legacy/ali14xx.c
+++ b/drivers/ide/legacy/ali14xx.c
@@ -223,7 +223,8 @@ static int __init ali14xx_probe(void)
probe_hwif_init(hwif);
probe_hwif_init(mate);
- create_proc_ide_interfaces();
+ ide_proc_register_port(hwif);
+ ide_proc_register_port(mate);
return 0;
}
diff --git a/drivers/ide/legacy/buddha.c b/drivers/ide/legacy/buddha.c
index 1ed224a01f7..101aee1711c 100644
--- a/drivers/ide/legacy/buddha.c
+++ b/drivers/ide/legacy/buddha.c
@@ -213,7 +213,7 @@ fail_base2:
IRQ_AMIGA_PORTS);
}
- index = ide_register_hw(&hw, &hwif);
+ index = ide_register_hw(&hw, 1, &hwif);
if (index != -1) {
hwif->mmio = 1;
printk("ide%d: ", index);
diff --git a/drivers/ide/legacy/dtc2278.c b/drivers/ide/legacy/dtc2278.c
index 0219ffa64db..36a3f0ac616 100644
--- a/drivers/ide/legacy/dtc2278.c
+++ b/drivers/ide/legacy/dtc2278.c
@@ -138,7 +138,8 @@ static int __init dtc2278_probe(void)
probe_hwif_init(hwif);
probe_hwif_init(mate);
- create_proc_ide_interfaces();
+ ide_proc_register_port(hwif);
+ ide_proc_register_port(mate);
return 0;
}
diff --git a/drivers/ide/legacy/falconide.c b/drivers/ide/legacy/falconide.c
index a9f2cd5bb81..e1e9d9d6893 100644
--- a/drivers/ide/legacy/falconide.c
+++ b/drivers/ide/legacy/falconide.c
@@ -70,7 +70,7 @@ void __init falconide_init(void)
0, 0, NULL,
// falconide_iops,
IRQ_MFP_IDE);
- index = ide_register_hw(&hw, NULL);
+ index = ide_register_hw(&hw, 1, NULL);
if (index != -1)
printk("ide%d: Falcon IDE interface\n", index);
diff --git a/drivers/ide/legacy/gayle.c b/drivers/ide/legacy/gayle.c
index dcfadbbf55d..0830a021bbb 100644
--- a/drivers/ide/legacy/gayle.c
+++ b/drivers/ide/legacy/gayle.c
@@ -165,7 +165,7 @@ found:
// &gayle_iops,
IRQ_AMIGA_PORTS);
- index = ide_register_hw(&hw, &hwif);
+ index = ide_register_hw(&hw, 1, &hwif);
if (index != -1) {
hwif->mmio = 1;
switch (i) {
diff --git a/drivers/ide/legacy/ht6560b.c b/drivers/ide/legacy/ht6560b.c
index a2832643c52..c8f353b1296 100644
--- a/drivers/ide/legacy/ht6560b.c
+++ b/drivers/ide/legacy/ht6560b.c
@@ -357,7 +357,8 @@ int __init ht6560b_init(void)
probe_hwif_init(hwif);
probe_hwif_init(mate);
- create_proc_ide_interfaces();
+ ide_proc_register_port(hwif);
+ ide_proc_register_port(mate);
return 0;
diff --git a/drivers/ide/legacy/ide-cs.c b/drivers/ide/legacy/ide-cs.c
index c6522a64d7e..2f3977f195b 100644
--- a/drivers/ide/legacy/ide-cs.c
+++ b/drivers/ide/legacy/ide-cs.c
@@ -153,7 +153,7 @@ static int idecs_register(unsigned long io, unsigned long ctl, unsigned long irq
hw.irq = irq;
hw.chipset = ide_pci;
hw.dev = &handle->dev;
- return ide_register_hw_with_fixup(&hw, NULL, ide_undecoded_slave);
+ return ide_register_hw_with_fixup(&hw, 0, NULL, ide_undecoded_slave);
}
/*======================================================================
diff --git a/drivers/ide/legacy/macide.c b/drivers/ide/legacy/macide.c
index 4c0079ad52a..c211fc78345 100644
--- a/drivers/ide/legacy/macide.c
+++ b/drivers/ide/legacy/macide.c
@@ -102,21 +102,21 @@ void macide_init(void)
0, 0, macide_ack_intr,
// quadra_ide_iops,
IRQ_NUBUS_F);
- index = ide_register_hw(&hw, &hwif);
+ index = ide_register_hw(&hw, 1, &hwif);
break;
case MAC_IDE_PB:
ide_setup_ports(&hw, IDE_BASE, macide_offsets,
0, 0, macide_ack_intr,
// macide_pb_iops,
IRQ_NUBUS_C);
- index = ide_register_hw(&hw, &hwif);
+ index = ide_register_hw(&hw, 1, &hwif);
break;
case MAC_IDE_BABOON:
ide_setup_ports(&hw, BABOON_BASE, macide_offsets,
0, 0, NULL,
// macide_baboon_iops,
IRQ_BABOON_1);
- index = ide_register_hw(&hw, &hwif);
+ index = ide_register_hw(&hw, 1, &hwif);
if (index == -1) break;
if (macintosh_config->ident == MAC_MODEL_PB190) {
diff --git a/drivers/ide/legacy/q40ide.c b/drivers/ide/legacy/q40ide.c
index 74f08124eab..e628a983ce3 100644
--- a/drivers/ide/legacy/q40ide.c
+++ b/drivers/ide/legacy/q40ide.c
@@ -142,7 +142,7 @@ void q40ide_init(void)
0, NULL,
// m68kide_iops,
q40ide_default_irq(pcide_bases[i]));
- index = ide_register_hw(&hw, &hwif);
+ index = ide_register_hw(&hw, 1, &hwif);
// **FIXME**
if (index != -1)
hwif->mmio = 1;
diff --git a/drivers/ide/legacy/qd65xx.c b/drivers/ide/legacy/qd65xx.c
index 2fb8f50f129..d1414a75b52 100644
--- a/drivers/ide/legacy/qd65xx.c
+++ b/drivers/ide/legacy/qd65xx.c
@@ -427,7 +427,7 @@ static int __init qd_probe(int base)
qd_setup(hwif, base, config, QD6500_DEF_DATA, QD6500_DEF_DATA,
&qd6500_tune_drive);
- create_proc_ide_interfaces();
+ ide_proc_register_port(hwif);
return 1;
}
@@ -459,7 +459,7 @@ static int __init qd_probe(int base)
&qd6580_tune_drive);
qd_write_reg(QD_DEF_CONTR,QD_CONTROL_PORT);
- create_proc_ide_interfaces();
+ ide_proc_register_port(hwif);
return 1;
} else {
@@ -479,7 +479,8 @@ static int __init qd_probe(int base)
&qd6580_tune_drive);
qd_write_reg(QD_DEF_CONTR,QD_CONTROL_PORT);
- create_proc_ide_interfaces();
+ ide_proc_register_port(hwif);
+ ide_proc_register_port(mate);
return 0; /* no other qd65xx possible */
}
diff --git a/drivers/ide/legacy/umc8672.c b/drivers/ide/legacy/umc8672.c
index ca797445557..ddc403a0bd8 100644
--- a/drivers/ide/legacy/umc8672.c
+++ b/drivers/ide/legacy/umc8672.c
@@ -160,7 +160,8 @@ static int __init umc8672_probe(void)
probe_hwif_init(hwif);
probe_hwif_init(mate);
- create_proc_ide_interfaces();
+ ide_proc_register_port(hwif);
+ ide_proc_register_port(mate);
return 0;
}
diff --git a/drivers/ide/mips/au1xxx-ide.c b/drivers/ide/mips/au1xxx-ide.c
index d54d9fe92a7..ca95e990862 100644
--- a/drivers/ide/mips/au1xxx-ide.c
+++ b/drivers/ide/mips/au1xxx-ide.c
@@ -760,6 +760,9 @@ static int au_ide_probe(struct device *dev)
#endif
probe_hwif_init(hwif);
+
+ ide_proc_register_port(hwif);
+
dev_set_drvdata(dev, hwif);
printk(KERN_INFO "Au1xxx IDE(builtin) configured for %s\n", mode );
diff --git a/drivers/ide/mips/swarm.c b/drivers/ide/mips/swarm.c
index 81fa06851b2..6e935d7c63f 100644
--- a/drivers/ide/mips/swarm.c
+++ b/drivers/ide/mips/swarm.c
@@ -129,6 +129,9 @@ static int __devinit swarm_ide_probe(struct device *dev)
hwif->irq = hwif->hw.irq;
probe_hwif_init(hwif);
+
+ ide_proc_register_port(hwif);
+
dev_set_drvdata(dev, hwif);
return 0;
diff --git a/drivers/ide/pci/aec62xx.c b/drivers/ide/pci/aec62xx.c
index 73bdf64dbbf..b173bc66ce1 100644
--- a/drivers/ide/pci/aec62xx.c
+++ b/drivers/ide/pci/aec62xx.c
@@ -87,38 +87,12 @@ static u8 pci_bus_clock_list_ultra (u8 speed, struct chipset_bus_clock_list_entr
return chipset_table->ultra_settings;
}
-static u8 aec62xx_ratemask (ide_drive_t *drive)
-{
- ide_hwif_t *hwif = HWIF(drive);
- u8 mode;
-
- switch(hwif->pci_dev->device) {
- case PCI_DEVICE_ID_ARTOP_ATP865:
- case PCI_DEVICE_ID_ARTOP_ATP865R:
- mode = (inb(hwif->channel ?
- hwif->mate->dma_status :
- hwif->dma_status) & 0x10) ? 4 : 3;
- break;
- case PCI_DEVICE_ID_ARTOP_ATP860:
- case PCI_DEVICE_ID_ARTOP_ATP860R:
- mode = 2;
- break;
- case PCI_DEVICE_ID_ARTOP_ATP850UF:
- default:
- return 1;
- }
-
- if (!eighty_ninty_three(drive))
- mode = min(mode, (u8)1);
- return mode;
-}
-
static int aec6210_tune_chipset (ide_drive_t *drive, u8 xferspeed)
{
ide_hwif_t *hwif = HWIF(drive);
struct pci_dev *dev = hwif->pci_dev;
u16 d_conf = 0;
- u8 speed = ide_rate_filter(aec62xx_ratemask(drive), xferspeed);
+ u8 speed = ide_rate_filter(drive, xferspeed);
u8 ultra = 0, ultra_conf = 0;
u8 tmp0 = 0, tmp1 = 0, tmp2 = 0;
unsigned long flags;
@@ -145,7 +119,7 @@ static int aec6260_tune_chipset (ide_drive_t *drive, u8 xferspeed)
{
ide_hwif_t *hwif = HWIF(drive);
struct pci_dev *dev = hwif->pci_dev;
- u8 speed = ide_rate_filter(aec62xx_ratemask(drive), xferspeed);
+ u8 speed = ide_rate_filter(drive, xferspeed);
u8 unit = (drive->select.b.unit & 0x01);
u8 tmp1 = 0, tmp2 = 0;
u8 ultra = 0, drive_conf = 0, ultra_conf = 0;
@@ -181,17 +155,6 @@ static int aec62xx_tune_chipset (ide_drive_t *drive, u8 speed)
}
}
-static int config_chipset_for_dma (ide_drive_t *drive)
-{
- u8 speed = ide_dma_speed(drive, aec62xx_ratemask(drive));
-
- if (!(speed))
- return 0;
-
- (void) aec62xx_tune_chipset(drive, speed);
- return ide_dma_enable(drive);
-}
-
static void aec62xx_tune_drive (ide_drive_t *drive, u8 pio)
{
pio = ide_get_best_pio_mode(drive, pio, 4, NULL);
@@ -200,7 +163,7 @@ static void aec62xx_tune_drive (ide_drive_t *drive, u8 pio)
static int aec62xx_config_drive_xfer_rate (ide_drive_t *drive)
{
- if (ide_use_dma(drive) && config_chipset_for_dma(drive))
+ if (ide_tune_dma(drive))
return 0;
if (ide_use_fast_pio(drive))
@@ -261,11 +224,13 @@ static unsigned int __devinit init_chipset_aec62xx(struct pci_dev *dev, const ch
static void __devinit init_hwif_aec62xx(ide_hwif_t *hwif)
{
+ struct pci_dev *dev = hwif->pci_dev;
+
hwif->autodma = 0;
hwif->tuneproc = &aec62xx_tune_drive;
hwif->speedproc = &aec62xx_tune_chipset;
- if (hwif->pci_dev->device == PCI_DEVICE_ID_ARTOP_ATP850UF)
+ if (dev->device == PCI_DEVICE_ID_ARTOP_ATP850UF)
hwif->serialized = hwif->channel;
if (hwif->mate)
@@ -277,7 +242,15 @@ static void __devinit init_hwif_aec62xx(ide_hwif_t *hwif)
return;
}
- hwif->ultra_mask = 0x7f;
+ hwif->ultra_mask = hwif->cds->udma_mask;
+
+ /* atp865 and atp865r */
+ if (hwif->ultra_mask == 0x3f) {
+ /* check bit 0x10 of DMA status register */
+ if (inb(pci_resource_start(dev, 4) + 2) & 0x10)
+ hwif->ultra_mask = 0x7f; /* udma0-6 */
+ }
+
hwif->mwdma_mask = 0x07;
hwif->ide_dma_check = &aec62xx_config_drive_xfer_rate;
@@ -344,6 +317,7 @@ static ide_pci_device_t aec62xx_chipsets[] __devinitdata = {
.autodma = AUTODMA,
.enablebits = {{0x4a,0x02,0x02}, {0x4a,0x04,0x04}},
.bootable = OFF_BOARD,
+ .udma_mask = 0x07, /* udma0-2 */
},{ /* 1 */
.name = "AEC6260",
.init_setup = init_setup_aec62xx,
@@ -353,6 +327,7 @@ static ide_pci_device_t aec62xx_chipsets[] __devinitdata = {
.channels = 2,
.autodma = NOAUTODMA,
.bootable = OFF_BOARD,
+ .udma_mask = 0x1f, /* udma0-4 */
},{ /* 2 */
.name = "AEC6260R",
.init_setup = init_setup_aec62xx,
@@ -363,6 +338,7 @@ static ide_pci_device_t aec62xx_chipsets[] __devinitdata = {
.autodma = AUTODMA,
.enablebits = {{0x4a,0x02,0x02}, {0x4a,0x04,0x04}},
.bootable = NEVER_BOARD,
+ .udma_mask = 0x1f, /* udma0-4 */
},{ /* 3 */
.name = "AEC6X80",
.init_setup = init_setup_aec6x80,
@@ -372,6 +348,7 @@ static ide_pci_device_t aec62xx_chipsets[] __devinitdata = {
.channels = 2,
.autodma = AUTODMA,
.bootable = OFF_BOARD,
+ .udma_mask = 0x3f, /* udma0-5 */
},{ /* 4 */
.name = "AEC6X80R",
.init_setup = init_setup_aec6x80,
@@ -382,6 +359,7 @@ static ide_pci_device_t aec62xx_chipsets[] __devinitdata = {
.autodma = AUTODMA,
.enablebits = {{0x4a,0x02,0x02}, {0x4a,0x04,0x04}},
.bootable = OFF_BOARD,
+ .udma_mask = 0x3f, /* udma0-5 */
}
};
diff --git a/drivers/ide/pci/alim15x3.c b/drivers/ide/pci/alim15x3.c
index 946a12746cb..428efdae0c7 100644
--- a/drivers/ide/pci/alim15x3.c
+++ b/drivers/ide/pci/alim15x3.c
@@ -50,7 +50,7 @@ static u8 m5229_revision;
static u8 chip_is_1543c_e;
static struct pci_dev *isa_dev;
-#if defined(DISPLAY_ALI_TIMINGS) && defined(CONFIG_PROC_FS)
+#if defined(DISPLAY_ALI_TIMINGS) && defined(CONFIG_IDE_PROC_FS)
#include <linux/stat.h>
#include <linux/proc_fs.h>
@@ -278,7 +278,7 @@ static int ali_get_info (char *buffer, char **addr, off_t offset, int count)
return p-buffer; /* => must be less than 4k! */
}
-#endif /* defined(DISPLAY_ALI_TIMINGS) && defined(CONFIG_PROC_FS) */
+#endif /* defined(DISPLAY_ALI_TIMINGS) && defined(CONFIG_IDE_PROC_FS) */
/**
* ali15x3_tune_pio - set up chipset for PIO mode
@@ -378,74 +378,31 @@ static void ali15x3_tune_drive (ide_drive_t *drive, u8 pio)
}
/**
- * ali15x3_can_ultra - check for ultra DMA support
- * @drive: drive to do the check
+ * ali_udma_filter - compute UDMA mask
+ * @drive: IDE device
*
- * Check the drive and controller revisions. Return 0 if UDMA is
- * not available, or 1 if UDMA can be used. The actual rules for
- * the ALi are
+ * Return available UDMA modes.
+ *
+ * The actual rules for the ALi are:
* No UDMA on revisions <= 0x20
* Disk only for revisions < 0xC2
* Not WDC drives for revisions < 0xC2
*
* FIXME: WDC ifdef needs to die
*/
-
-static u8 ali15x3_can_ultra (ide_drive_t *drive)
-{
-#ifndef CONFIG_WDC_ALI15X3
- struct hd_driveid *id = drive->id;
-#endif /* CONFIG_WDC_ALI15X3 */
- if (m5229_revision <= 0x20) {
- return 0;
- } else if ((m5229_revision < 0xC2) &&
-#ifndef CONFIG_WDC_ALI15X3
- ((chip_is_1543c_e && strstr(id->model, "WDC ")) ||
- (drive->media!=ide_disk))) {
-#else /* CONFIG_WDC_ALI15X3 */
- (drive->media!=ide_disk)) {
-#endif /* CONFIG_WDC_ALI15X3 */
- return 0;
- } else {
- return 1;
- }
-}
-
-/**
- * ali15x3_ratemask - generate DMA mode list
- * @drive: drive to compute against
- *
- * Generate a list of the available DMA modes for the drive.
- * FIXME: this function contains lots of bogus masking we can dump
- *
- * Return the highest available mode (UDMA33, UDMA66, UDMA100,..)
- */
-
-static u8 ali15x3_ratemask (ide_drive_t *drive)
+static u8 ali_udma_filter(ide_drive_t *drive)
{
- u8 mode = 0, can_ultra = ali15x3_can_ultra(drive);
-
- if (m5229_revision > 0xC4 && can_ultra) {
- mode = 4;
- } else if (m5229_revision == 0xC4 && can_ultra) {
- mode = 3;
- } else if (m5229_revision >= 0xC2 && can_ultra) {
- mode = 2;
- } else if (can_ultra) {
- return 1;
- } else {
- return 0;
+ if (m5229_revision > 0x20 && m5229_revision < 0xC2) {
+ if (drive->media != ide_disk)
+ return 0;
+#ifndef CONFIG_WDC_ALI15X3
+ if (chip_is_1543c_e && strstr(drive->id->model, "WDC "))
+ return 0;
+#endif
}
- /*
- * If the drive sees no suitable cable then UDMA 33
- * is the highest permitted mode
- */
-
- if (!eighty_ninty_three(drive))
- mode = min(mode, (u8)1);
- return mode;
+ return drive->hwif->ultra_mask;
}
/**
@@ -461,7 +418,7 @@ static int ali15x3_tune_chipset (ide_drive_t *drive, u8 xferspeed)
{
ide_hwif_t *hwif = HWIF(drive);
struct pci_dev *dev = hwif->pci_dev;
- u8 speed = ide_rate_filter(ali15x3_ratemask(drive), xferspeed);
+ u8 speed = ide_rate_filter(drive, xferspeed);
u8 speed1 = speed;
u8 unit = (drive->select.b.unit & 0x01);
u8 tmpbyte = 0x00;
@@ -511,7 +468,7 @@ static int ali15x3_tune_chipset (ide_drive_t *drive, u8 xferspeed)
static int config_chipset_for_dma (ide_drive_t *drive)
{
- u8 speed = ide_dma_speed(drive, ali15x3_ratemask(drive));
+ u8 speed = ide_max_dma_mode(drive);
if (!(speed))
return 0;
@@ -609,13 +566,13 @@ static unsigned int __devinit init_chipset_ali15x3 (struct pci_dev *dev, const c
isa_dev = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL);
-#if defined(DISPLAY_ALI_TIMINGS) && defined(CONFIG_PROC_FS)
+#if defined(DISPLAY_ALI_TIMINGS) && defined(CONFIG_IDE_PROC_FS)
if (!ali_proc) {
ali_proc = 1;
bmide_dev = dev;
ide_pci_create_host_proc("ali", ali_get_info);
}
-#endif /* defined(DISPLAY_ALI_TIMINGS) && defined(CONFIG_PROC_FS) */
+#endif /* defined(DISPLAY_ALI_TIMINGS) && defined(CONFIG_IDE_PROC_FS) */
local_irq_save(flags);
@@ -771,6 +728,7 @@ static void __devinit init_hwif_common_ali15x3 (ide_hwif_t *hwif)
hwif->autodma = 0;
hwif->tuneproc = &ali15x3_tune_drive;
hwif->speedproc = &ali15x3_tune_chipset;
+ hwif->udma_filter = &ali_udma_filter;
/* don't use LBA48 DMA on ALi devices before rev 0xC5 */
hwif->no_lba48_dma = (m5229_revision <= 0xC4) ? 1 : 0;
@@ -783,8 +741,17 @@ static void __devinit init_hwif_common_ali15x3 (ide_hwif_t *hwif)
hwif->atapi_dma = 1;
- if (m5229_revision > 0x20)
- hwif->ultra_mask = 0x7f;
+ if (m5229_revision <= 0x20)
+ hwif->ultra_mask = 0x00; /* no udma */
+ else if (m5229_revision < 0xC2)
+ hwif->ultra_mask = 0x07; /* udma0-2 */
+ else if (m5229_revision == 0xC2 || m5229_revision == 0xC3)
+ hwif->ultra_mask = 0x1f; /* udma0-4 */
+ else if (m5229_revision == 0xC4)
+ hwif->ultra_mask = 0x3f; /* udma0-5 */
+ else
+ hwif->ultra_mask = 0x7f; /* udma0-6 */
+
hwif->mwdma_mask = 0x07;
hwif->swdma_mask = 0x07;
diff --git a/drivers/ide/pci/amd74xx.c b/drivers/ide/pci/amd74xx.c
index 7989bdd842a..becb1a5648b 100644
--- a/drivers/ide/pci/amd74xx.c
+++ b/drivers/ide/pci/amd74xx.c
@@ -92,7 +92,7 @@ static unsigned char amd_cyc2udma[] = { 6, 6, 5, 4, 0, 1, 1, 2, 2, 3, 3, 3, 3, 3
* AMD /proc entry.
*/
-#ifdef CONFIG_PROC_FS
+#ifdef CONFIG_IDE_PROC_FS
#include <linux/stat.h>
#include <linux/proc_fs.h>
@@ -402,14 +402,14 @@ static unsigned int __devinit init_chipset_amd74xx(struct pci_dev *dev, const ch
* Register /proc/ide/amd74xx entry
*/
-#if defined(DISPLAY_AMD_TIMINGS) && defined(CONFIG_PROC_FS)
+#if defined(DISPLAY_AMD_TIMINGS) && defined(CONFIG_IDE_PROC_FS)
if (!amd74xx_proc) {
amd_base = pci_resource_start(dev, 4);
bmide_dev = dev;
ide_pci_create_host_proc("amd74xx", amd74xx_get_info);
amd74xx_proc = 1;
}
-#endif /* DISPLAY_AMD_TIMINGS && CONFIG_PROC_FS */
+#endif /* DISPLAY_AMD_TIMINGS && CONFIG_IDE_PROC_FS */
return dev->irq;
}
diff --git a/drivers/ide/pci/atiixp.c b/drivers/ide/pci/atiixp.c
index 2d48af32e3f..0e52ad722a7 100644
--- a/drivers/ide/pci/atiixp.c
+++ b/drivers/ide/pci/atiixp.c
@@ -49,22 +49,6 @@ static int save_mdma_mode[4];
static DEFINE_SPINLOCK(atiixp_lock);
/**
- * atiixp_ratemask - compute rate mask for ATIIXP IDE
- * @drive: IDE drive to compute for
- *
- * Returns the available modes for the ATIIXP IDE controller.
- */
-
-static u8 atiixp_ratemask(ide_drive_t *drive)
-{
- u8 mode = 3;
-
- if (!eighty_ninty_three(drive))
- mode = min(mode, (u8)1);
- return mode;
-}
-
-/**
* atiixp_dma_2_pio - return the PIO mode matching DMA
* @xfer_rate: transfer speed
*
@@ -189,7 +173,7 @@ static int atiixp_speedproc(ide_drive_t *drive, u8 xferspeed)
u16 tmp16;
u8 speed, pio;
- speed = ide_rate_filter(atiixp_ratemask(drive), xferspeed);
+ speed = ide_rate_filter(drive, xferspeed);
spin_lock_irqsave(&atiixp_lock, flags);
@@ -223,26 +207,6 @@ static int atiixp_speedproc(ide_drive_t *drive, u8 xferspeed)
}
/**
- * atiixp_config_drive_for_dma - configure drive for DMA
- * @drive: IDE drive to configure
- *
- * Set up a ATIIXP interface channel for the best available speed.
- * We prefer UDMA if it is available and then MWDMA. If DMA is
- * not available we switch to PIO and return 0.
- */
-
-static int atiixp_config_drive_for_dma(ide_drive_t *drive)
-{
- u8 speed = ide_dma_speed(drive, atiixp_ratemask(drive));
-
- if (!speed)
- return 0;
-
- (void) atiixp_speedproc(drive, speed);
- return ide_dma_enable(drive);
-}
-
-/**
* atiixp_dma_check - set up an IDE device
* @drive: IDE drive to configure
*
@@ -256,7 +220,7 @@ static int atiixp_dma_check(ide_drive_t *drive)
drive->init_speed = 0;
- if (ide_use_dma(drive) && atiixp_config_drive_for_dma(drive))
+ if (ide_tune_dma(drive))
return 0;
if (ide_use_fast_pio(drive)) {
diff --git a/drivers/ide/pci/cmd64x.c b/drivers/ide/pci/cmd64x.c
index 77f51ab6d43..61ea96b5555 100644
--- a/drivers/ide/pci/cmd64x.c
+++ b/drivers/ide/pci/cmd64x.c
@@ -74,7 +74,7 @@
#define UDIDETCR1 0x7B
#define DTPR1 0x7C
-#if defined(DISPLAY_CMD64X_TIMINGS) && defined(CONFIG_PROC_FS)
+#if defined(DISPLAY_CMD64X_TIMINGS) && defined(CONFIG_IDE_PROC_FS)
#include <linux/stat.h>
#include <linux/proc_fs.h>
@@ -165,7 +165,7 @@ static int cmd64x_get_info (char *buffer, char **addr, off_t offset, int count)
return p-buffer; /* => must be less than 4k! */
}
-#endif /* defined(DISPLAY_CMD64X_TIMINGS) && defined(CONFIG_PROC_FS) */
+#endif /* defined(DISPLAY_CMD64X_TIMINGS) && defined(CONFIG_IDE_PROC_FS) */
static u8 quantize_timing(int timing, int quant)
{
@@ -292,55 +292,6 @@ static void cmd64x_tune_drive (ide_drive_t *drive, u8 pio)
(void) ide_config_drive_speed(drive, XFER_PIO_0 + pio);
}
-static u8 cmd64x_ratemask (ide_drive_t *drive)
-{
- struct pci_dev *dev = HWIF(drive)->pci_dev;
- u8 mode = 0;
-
- switch(dev->device) {
- case PCI_DEVICE_ID_CMD_649:
- mode = 3;
- break;
- case PCI_DEVICE_ID_CMD_648:
- mode = 2;
- break;
- case PCI_DEVICE_ID_CMD_643:
- return 0;
-
- case PCI_DEVICE_ID_CMD_646:
- {
- unsigned int class_rev = 0;
- pci_read_config_dword(dev,
- PCI_CLASS_REVISION, &class_rev);
- class_rev &= 0xff;
- /*
- * UltraDMA only supported on PCI646U and PCI646U2, which
- * correspond to revisions 0x03, 0x05 and 0x07 respectively.
- * Actually, although the CMD tech support people won't
- * tell me the details, the 0x03 revision cannot support
- * UDMA correctly without hardware modifications, and even
- * then it only works with Quantum disks due to some
- * hold time assumptions in the 646U part which are fixed
- * in the 646U2.
- *
- * So we only do UltraDMA on revision 0x05 and 0x07 chipsets.
- */
- switch(class_rev) {
- case 0x07:
- case 0x05:
- return 1;
- case 0x03:
- case 0x01:
- default:
- return 0;
- }
- }
- }
- if (!eighty_ninty_three(drive))
- mode = min(mode, (u8)1);
- return mode;
-}
-
static int cmd64x_tune_chipset (ide_drive_t *drive, u8 speed)
{
ide_hwif_t *hwif = HWIF(drive);
@@ -348,7 +299,7 @@ static int cmd64x_tune_chipset (ide_drive_t *drive, u8 speed)
u8 unit = drive->dn & 0x01;
u8 regU = 0, pciU = hwif->channel ? UDIDETCR1 : UDIDETCR0;
- speed = ide_rate_filter(cmd64x_ratemask(drive), speed);
+ speed = ide_rate_filter(drive, speed);
if (speed >= XFER_SW_DMA_0) {
(void) pci_read_config_byte(dev, pciU, &regU);
@@ -403,7 +354,7 @@ static int cmd64x_tune_chipset (ide_drive_t *drive, u8 speed)
static int config_chipset_for_dma (ide_drive_t *drive)
{
- u8 speed = ide_dma_speed(drive, cmd64x_ratemask(drive));
+ u8 speed = ide_max_dma_mode(drive);
if (!speed)
return 0;
@@ -597,7 +548,7 @@ static unsigned int __devinit init_chipset_cmd64x(struct pci_dev *dev, const cha
(void) pci_write_config_byte(dev, UDIDETCR0, 0xf0);
#endif /* CONFIG_PPC */
-#if defined(DISPLAY_CMD64X_TIMINGS) && defined(CONFIG_PROC_FS)
+#if defined(DISPLAY_CMD64X_TIMINGS) && defined(CONFIG_IDE_PROC_FS)
cmd_devs[n_cmd_devs++] = dev;
@@ -605,7 +556,7 @@ static unsigned int __devinit init_chipset_cmd64x(struct pci_dev *dev, const cha
cmd64x_proc = 1;
ide_pci_create_host_proc("cmd64x", cmd64x_get_info);
}
-#endif /* DISPLAY_CMD64X_TIMINGS && CONFIG_PROC_FS */
+#endif /* DISPLAY_CMD64X_TIMINGS && CONFIG_IDE_PROC_FS */
return 0;
}
@@ -644,15 +595,24 @@ static void __devinit init_hwif_cmd64x(ide_hwif_t *hwif)
hwif->atapi_dma = 1;
- hwif->ultra_mask = 0x3f;
- hwif->mwdma_mask = 0x07;
+ hwif->ultra_mask = hwif->cds->udma_mask;
+
+ /*
+ * UltraDMA only supported on PCI646U and PCI646U2, which
+ * correspond to revisions 0x03, 0x05 and 0x07 respectively.
+ * Actually, although the CMD tech support people won't
+ * tell me the details, the 0x03 revision cannot support
+ * UDMA correctly without hardware modifications, and even
+ * then it only works with Quantum disks due to some
+ * hold time assumptions in the 646U part which are fixed
+ * in the 646U2.
+ *
+ * So we only do UltraDMA on revision 0x05 and 0x07 chipsets.
+ */
+ if (dev->device == PCI_DEVICE_ID_CMD_646 && class_rev < 5)
+ hwif->ultra_mask = 0x00;
- if (dev->device == PCI_DEVICE_ID_CMD_643)
- hwif->ultra_mask = 0x80;
- if (dev->device == PCI_DEVICE_ID_CMD_646)
- hwif->ultra_mask = (class_rev > 0x04) ? 0x07 : 0x80;
- if (dev->device == PCI_DEVICE_ID_CMD_648)
- hwif->ultra_mask = 0x1f;
+ hwif->mwdma_mask = 0x07;
hwif->ide_dma_check = &cmd64x_config_drive_for_dma;
if (!(hwif->udma_four))
@@ -716,6 +676,7 @@ static ide_pci_device_t cmd64x_chipsets[] __devinitdata = {
.autodma = AUTODMA,
.enablebits = {{0x00,0x00,0x00}, {0x51,0x08,0x08}},
.bootable = ON_BOARD,
+ .udma_mask = 0x00, /* no udma */
},{ /* 1 */
.name = "CMD646",
.init_setup = init_setup_cmd646,
@@ -725,6 +686,7 @@ static ide_pci_device_t cmd64x_chipsets[] __devinitdata = {
.autodma = AUTODMA,
.enablebits = {{0x51,0x04,0x04}, {0x51,0x08,0x08}},
.bootable = ON_BOARD,
+ .udma_mask = 0x07, /* udma0-2 */
},{ /* 2 */
.name = "CMD648",
.init_setup = init_setup_cmd64x,
@@ -734,6 +696,7 @@ static ide_pci_device_t cmd64x_chipsets[] __devinitdata = {
.autodma = AUTODMA,
.enablebits = {{0x51,0x04,0x04}, {0x51,0x08,0x08}},
.bootable = ON_BOARD,
+ .udma_mask = 0x1f, /* udma0-4 */
},{ /* 3 */
.name = "CMD649",
.init_setup = init_setup_cmd64x,
@@ -743,6 +706,7 @@ static ide_pci_device_t cmd64x_chipsets[] __devinitdata = {
.autodma = AUTODMA,
.enablebits = {{0x51,0x04,0x04}, {0x51,0x08,0x08}},
.bootable = ON_BOARD,
+ .udma_mask = 0x3f, /* udma0-5 */
}
};
diff --git a/drivers/ide/pci/cs5520.c b/drivers/ide/pci/cs5520.c
index 400859a839f..3b88a3a5611 100644
--- a/drivers/ide/pci/cs5520.c
+++ b/drivers/ide/pci/cs5520.c
@@ -213,6 +213,7 @@ static ide_pci_device_t cyrix_chipsets[] __devinitdata = {
static int __devinit cs5520_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
+ ide_hwif_t *hwif = NULL, *mate = NULL;
ata_index_t index;
ide_pci_device_t *d = &cyrix_chipsets[id->driver_data];
@@ -239,10 +240,21 @@ static int __devinit cs5520_init_one(struct pci_dev *dev, const struct pci_devic
ide_pci_setup_ports(dev, d, 14, &index);
- if((index.b.low & 0xf0) != 0xf0)
- probe_hwif_init(&ide_hwifs[index.b.low]);
- if((index.b.high & 0xf0) != 0xf0)
- probe_hwif_init(&ide_hwifs[index.b.high]);
+ if ((index.b.low & 0xf0) != 0xf0)
+ hwif = &ide_hwifs[index.b.low];
+ if ((index.b.high & 0xf0) != 0xf0)
+ mate = &ide_hwifs[index.b.high];
+
+ if (hwif)
+ probe_hwif_init(hwif);
+ if (mate)
+ probe_hwif_init(mate);
+
+ if (hwif)
+ ide_proc_register_port(hwif);
+ if (mate)
+ ide_proc_register_port(mate);
+
return 0;
}
diff --git a/drivers/ide/pci/cs5535.c b/drivers/ide/pci/cs5535.c
index 45f43efbf92..41925c47ef0 100644
--- a/drivers/ide/pci/cs5535.c
+++ b/drivers/ide/pci/cs5535.c
@@ -127,20 +127,6 @@ static void cs5535_set_speed(ide_drive_t *drive, u8 speed)
}
}
-static u8 cs5535_ratemask(ide_drive_t *drive)
-{
- /* eighty93 will return 1 if it's 80core and capable of
- exceeding udma2, 0 otherwise. we need ratemask to set
- the max speed and if we can > udma2 then we return 2
- which selects speed_max as udma4 which is the 5535's max
- speed, and 1 selects udma2 which is the max for 40c */
- if (!eighty_ninty_three(drive))
- return 1;
-
- return 2;
-}
-
-
/****
* cs5535_set_drive - Configure the drive to the new speed
* @drive: Drive to set up
@@ -151,7 +137,7 @@ static u8 cs5535_ratemask(ide_drive_t *drive)
*/
static int cs5535_set_drive(ide_drive_t *drive, u8 speed)
{
- speed = ide_rate_filter(cs5535_ratemask(drive), speed);
+ speed = ide_rate_filter(drive, speed);
ide_config_drive_speed(drive, speed);
cs5535_set_speed(drive, speed);
@@ -178,28 +164,13 @@ static void cs5535_tuneproc(ide_drive_t *drive, u8 xferspeed)
cs5535_set_speed(drive, xferspeed);
}
-static int cs5535_config_drive_for_dma(ide_drive_t *drive)
-{
- u8 speed;
-
- speed = ide_dma_speed(drive, cs5535_ratemask(drive));
-
- /* If no DMA speed was available then let dma_check hit pio */
- if (!speed) {
- return 0;
- }
-
- cs5535_set_drive(drive, speed);
- return ide_dma_enable(drive);
-}
-
static int cs5535_dma_check(ide_drive_t *drive)
{
u8 speed;
drive->init_speed = 0;
- if (ide_use_dma(drive) && cs5535_config_drive_for_dma(drive))
+ if (ide_tune_dma(drive))
return 0;
if (ide_use_fast_pio(drive)) {
diff --git a/drivers/ide/pci/delkin_cb.c b/drivers/ide/pci/delkin_cb.c
index dd7ec37fdea..46f4a888c03 100644
--- a/drivers/ide/pci/delkin_cb.c
+++ b/drivers/ide/pci/delkin_cb.c
@@ -80,7 +80,7 @@ delkin_cb_probe (struct pci_dev *dev, const struct pci_device_id *id)
hw.irq = dev->irq;
hw.chipset = ide_pci; /* this enables IRQ sharing */
- rc = ide_register_hw_with_fixup(&hw, &hwif, ide_undecoded_slave);
+ rc = ide_register_hw_with_fixup(&hw, 0, &hwif, ide_undecoded_slave);
if (rc < 0) {
printk(KERN_ERR "delkin_cb: ide_register_hw failed (%d)\n", rc);
pci_disable_device(dev);
diff --git a/drivers/ide/pci/hpt34x.c b/drivers/ide/pci/hpt34x.c
index 924eaa3a570..2c24c3de884 100644
--- a/drivers/ide/pci/hpt34x.c
+++ b/drivers/ide/pci/hpt34x.c
@@ -43,15 +43,10 @@
#define HPT343_DEBUG_DRIVE_INFO 0
-static u8 hpt34x_ratemask (ide_drive_t *drive)
-{
- return 1;
-}
-
static int hpt34x_tune_chipset (ide_drive_t *drive, u8 xferspeed)
{
struct pci_dev *dev = HWIF(drive)->pci_dev;
- u8 speed = ide_rate_filter(hpt34x_ratemask(drive), xferspeed);
+ u8 speed = ide_rate_filter(drive, xferspeed);
u32 reg1= 0, tmp1 = 0, reg2 = 0, tmp2 = 0;
u8 hi_speed, lo_speed;
@@ -89,29 +84,11 @@ static void hpt34x_tune_drive (ide_drive_t *drive, u8 pio)
(void) hpt34x_tune_chipset(drive, (XFER_PIO_0 + pio));
}
-/*
- * This allows the configuration of ide_pci chipset registers
- * for cards that learn about the drive's UDMA, DMA, PIO capabilities
- * after the drive is reported by the OS. Initially for designed for
- * HPT343 UDMA chipset by HighPoint|Triones Technologies, Inc.
- */
-
-static int config_chipset_for_dma (ide_drive_t *drive)
-{
- u8 speed = ide_dma_speed(drive, hpt34x_ratemask(drive));
-
- if (!(speed))
- return 0;
-
- (void) hpt34x_tune_chipset(drive, speed);
- return ide_dma_enable(drive);
-}
-
static int hpt34x_config_drive_xfer_rate (ide_drive_t *drive)
{
drive->init_speed = 0;
- if (ide_use_dma(drive) && config_chipset_for_dma(drive))
+ if (ide_tune_dma(drive))
#ifndef CONFIG_HPT34X_AUTODMA
return -1;
#else
diff --git a/drivers/ide/pci/hpt366.c b/drivers/ide/pci/hpt366.c
index cf9d344d19f..fcbc5605b38 100644
--- a/drivers/ide/pci/hpt366.c
+++ b/drivers/ide/pci/hpt366.c
@@ -514,43 +514,31 @@ static int check_in_drive_list(ide_drive_t *drive, const char **list)
return 0;
}
-static u8 hpt3xx_ratemask(ide_drive_t *drive)
-{
- struct hpt_info *info = pci_get_drvdata(HWIF(drive)->pci_dev);
- u8 mode = info->max_mode;
-
- if (!eighty_ninty_three(drive) && mode)
- mode = min(mode, (u8)1);
- return mode;
-}
-
/*
* Note for the future; the SATA hpt37x we must set
* either PIO or UDMA modes 0,4,5
*/
-
-static u8 hpt3xx_ratefilter(ide_drive_t *drive, u8 speed)
+
+static u8 hpt3xx_udma_filter(ide_drive_t *drive)
{
struct hpt_info *info = pci_get_drvdata(HWIF(drive)->pci_dev);
u8 chip_type = info->chip_type;
- u8 mode = hpt3xx_ratemask(drive);
-
- if (drive->media != ide_disk)
- return min(speed, (u8)XFER_PIO_4);
+ u8 mode = info->max_mode;
+ u8 mask;
switch (mode) {
case 0x04:
- speed = min_t(u8, speed, XFER_UDMA_6);
+ mask = 0x7f;
break;
case 0x03:
- speed = min_t(u8, speed, XFER_UDMA_5);
+ mask = 0x3f;
if (chip_type >= HPT374)
break;
if (!check_in_drive_list(drive, bad_ata100_5))
goto check_bad_ata33;
/* fall thru */
case 0x02:
- speed = min_t(u8, speed, XFER_UDMA_4);
+ mask = 0x1f;
/*
* CHECK ME, Does this need to be changed to HPT374 ??
@@ -561,13 +549,13 @@ static u8 hpt3xx_ratefilter(ide_drive_t *drive, u8 speed)
!check_in_drive_list(drive, bad_ata66_4))
goto check_bad_ata33;
- speed = min_t(u8, speed, XFER_UDMA_3);
+ mask = 0x0f;
if (HPT366_ALLOW_ATA66_3 &&
!check_in_drive_list(drive, bad_ata66_3))
goto check_bad_ata33;
/* fall thru */
case 0x01:
- speed = min_t(u8, speed, XFER_UDMA_2);
+ mask = 0x07;
check_bad_ata33:
if (chip_type >= HPT370A)
@@ -577,10 +565,10 @@ static u8 hpt3xx_ratefilter(ide_drive_t *drive, u8 speed)
/* fall thru */
case 0x00:
default:
- speed = min_t(u8, speed, XFER_MW_DMA_2);
+ mask = 0x00;
break;
}
- return speed;
+ return mask;
}
static u32 get_speed_setting(u8 speed, struct hpt_info *info)
@@ -608,12 +596,19 @@ static int hpt36x_tune_chipset(ide_drive_t *drive, u8 xferspeed)
ide_hwif_t *hwif = HWIF(drive);
struct pci_dev *dev = hwif->pci_dev;
struct hpt_info *info = pci_get_drvdata(dev);
- u8 speed = hpt3xx_ratefilter(drive, xferspeed);
+ u8 speed = ide_rate_filter(drive, xferspeed);
u8 itr_addr = drive->dn ? 0x44 : 0x40;
- u32 itr_mask = speed < XFER_MW_DMA_0 ? 0x30070000 :
- (speed < XFER_UDMA_0 ? 0xc0070000 : 0xc03800ff);
- u32 new_itr = get_speed_setting(speed, info);
u32 old_itr = 0;
+ u32 itr_mask, new_itr;
+
+ /* TODO: move this to ide_rate_filter() [ check ->atapi_dma ] */
+ if (drive->media != ide_disk)
+ speed = min_t(u8, speed, XFER_PIO_4);
+
+ itr_mask = speed < XFER_MW_DMA_0 ? 0x30070000 :
+ (speed < XFER_UDMA_0 ? 0xc0070000 : 0xc03800ff);
+
+ new_itr = get_speed_setting(speed, info);
/*
* Disable on-chip PIO FIFO/buffer (and PIO MST mode as well)
@@ -633,12 +628,19 @@ static int hpt37x_tune_chipset(ide_drive_t *drive, u8 xferspeed)
ide_hwif_t *hwif = HWIF(drive);
struct pci_dev *dev = hwif->pci_dev;
struct hpt_info *info = pci_get_drvdata(dev);
- u8 speed = hpt3xx_ratefilter(drive, xferspeed);
+ u8 speed = ide_rate_filter(drive, xferspeed);
u8 itr_addr = 0x40 + (drive->dn * 4);
- u32 itr_mask = speed < XFER_MW_DMA_0 ? 0x303c0000 :
- (speed < XFER_UDMA_0 ? 0xc03c0000 : 0xc1c001ff);
- u32 new_itr = get_speed_setting(speed, info);
u32 old_itr = 0;
+ u32 itr_mask, new_itr;
+
+ /* TODO: move this to ide_rate_filter() [ check ->atapi_dma ] */
+ if (drive->media != ide_disk)
+ speed = min_t(u8, speed, XFER_PIO_4);
+
+ itr_mask = speed < XFER_MW_DMA_0 ? 0x303c0000 :
+ (speed < XFER_UDMA_0 ? 0xc03c0000 : 0xc1c001ff);
+
+ new_itr = get_speed_setting(speed, info);
pci_read_config_dword(dev, itr_addr, &old_itr);
new_itr = (new_itr & ~itr_mask) | (old_itr & itr_mask);
@@ -667,24 +669,6 @@ static void hpt3xx_tune_drive(ide_drive_t *drive, u8 pio)
(void) hpt3xx_tune_chipset (drive, XFER_PIO_0 + pio);
}
-/*
- * This allows the configuration of ide_pci chipset registers
- * for cards that learn about the drive's UDMA, DMA, PIO capabilities
- * after the drive is reported by the OS. Initially designed for
- * HPT366 UDMA chipset by HighPoint|Triones Technologies, Inc.
- *
- */
-static int config_chipset_for_dma(ide_drive_t *drive)
-{
- u8 speed = ide_dma_speed(drive, hpt3xx_ratemask(drive));
-
- if (!speed)
- return 0;
-
- (void) hpt3xx_tune_chipset(drive, speed);
- return ide_dma_enable(drive);
-}
-
static int hpt3xx_quirkproc(ide_drive_t *drive)
{
struct hd_driveid *id = drive->id;
@@ -739,7 +723,7 @@ static int hpt366_config_drive_xfer_rate(ide_drive_t *drive)
{
drive->init_speed = 0;
- if (ide_use_dma(drive) && config_chipset_for_dma(drive))
+ if (ide_tune_dma(drive))
return 0;
if (ide_use_fast_pio(drive))
@@ -1271,6 +1255,7 @@ static void __devinit init_hwif_hpt366(ide_hwif_t *hwif)
hwif->intrproc = &hpt3xx_intrproc;
hwif->maskproc = &hpt3xx_maskproc;
hwif->busproc = &hpt3xx_busproc;
+ hwif->udma_filter = &hpt3xx_udma_filter;
/*
* HPT3xxN chips have some complications:
diff --git a/drivers/ide/pci/it8213.c b/drivers/ide/pci/it8213.c
index 424f00bb160..c04a02687b9 100644
--- a/drivers/ide/pci/it8213.c
+++ b/drivers/ide/pci/it8213.c
@@ -17,22 +17,6 @@
#include <asm/io.h>
-/*
- * it8213_ratemask - Compute available modes
- * @drive: IDE drive
- *
- * Compute the available speeds for the devices on the interface. This
- * is all modes to ATA133 clipped by drive cable setup.
- */
-
-static u8 it8213_ratemask (ide_drive_t *drive)
-{
- u8 mode = 4;
- if (!eighty_ninty_three(drive))
- mode = min_t(u8, mode, 1);
- return mode;
-}
-
/**
* it8213_dma_2_pio - return the PIO mode matching DMA
* @xfer_rate: transfer speed
@@ -145,7 +129,7 @@ static int it8213_tune_chipset (ide_drive_t *drive, u8 xferspeed)
ide_hwif_t *hwif = HWIF(drive);
struct pci_dev *dev = hwif->pci_dev;
u8 maslave = 0x40;
- u8 speed = ide_rate_filter(it8213_ratemask(drive), xferspeed);
+ u8 speed = ide_rate_filter(drive, xferspeed);
int a_speed = 3 << (drive->dn * 4);
int u_flag = 1 << drive->dn;
int v_flag = 0x01 << drive->dn;
@@ -213,25 +197,6 @@ static int it8213_tune_chipset (ide_drive_t *drive, u8 xferspeed)
return ide_config_drive_speed(drive, speed);
}
-/*
- * config_chipset_for_dma - configure for DMA
- * @drive: drive to configure
- *
- * Called by the IDE layer when it wants the timings set up.
- */
-
-static int config_chipset_for_dma (ide_drive_t *drive)
-{
- u8 speed = ide_dma_speed(drive, it8213_ratemask(drive));
-
- if (!speed)
- return 0;
-
- it8213_tune_chipset(drive, speed);
-
- return ide_dma_enable(drive);
-}
-
/**
* it8213_configure_drive_for_dma - set up for DMA transfers
* @drive: drive we are going to set up
@@ -246,7 +211,7 @@ static int it8213_config_drive_for_dma (ide_drive_t *drive)
{
u8 pio;
- if (ide_use_dma(drive) && config_chipset_for_dma(drive))
+ if (ide_tune_dma(drive))
return 0;
pio = ide_get_best_pio_mode(drive, 255, 4, NULL);
diff --git a/drivers/ide/pci/it821x.c b/drivers/ide/pci/it821x.c
index 4e1254813ee..442f658c6ae 100644
--- a/drivers/ide/pci/it821x.c
+++ b/drivers/ide/pci/it821x.c
@@ -229,22 +229,6 @@ static void it821x_clock_strategy(ide_drive_t *drive)
}
/**
- * it821x_ratemask - Compute available modes
- * @drive: IDE drive
- *
- * Compute the available speeds for the devices on the interface. This
- * is all modes to ATA133 clipped by drive cable setup.
- */
-
-static u8 it821x_ratemask (ide_drive_t *drive)
-{
- u8 mode = 4;
- if (!eighty_ninty_three(drive))
- mode = min(mode, (u8)1);
- return mode;
-}
-
-/**
* it821x_tunepio - tune a drive
* @drive: drive to tune
* @pio: the desired PIO mode
@@ -438,7 +422,7 @@ static int it821x_tune_chipset (ide_drive_t *drive, byte xferspeed)
ide_hwif_t *hwif = drive->hwif;
struct it821x_dev *itdev = ide_get_hwifdata(hwif);
- u8 speed = ide_rate_filter(it821x_ratemask(drive), xferspeed);
+ u8 speed = ide_rate_filter(drive, xferspeed);
switch (speed) {
case XFER_PIO_4:
@@ -488,7 +472,7 @@ static int it821x_tune_chipset (ide_drive_t *drive, byte xferspeed)
static int config_chipset_for_dma (ide_drive_t *drive)
{
- u8 speed = ide_dma_speed(drive, it821x_ratemask(drive));
+ u8 speed = ide_max_dma_mode(drive);
if (speed == 0)
return 0;
diff --git a/drivers/ide/pci/jmicron.c b/drivers/ide/pci/jmicron.c
index be4fc96c29e..76ed2514722 100644
--- a/drivers/ide/pci/jmicron.c
+++ b/drivers/ide/pci/jmicron.c
@@ -22,22 +22,6 @@ typedef enum {
} port_type;
/**
- * jmicron_ratemask - Compute available modes
- * @drive: IDE drive
- *
- * Compute the available speeds for the devices on the interface. This
- * is all modes to ATA133 clipped by drive cable setup.
- */
-
-static u8 jmicron_ratemask(ide_drive_t *drive)
-{
- u8 mode = 4;
- if (!eighty_ninty_three(drive))
- mode = min(mode, (u8)1);
- return mode;
-}
-
-/**
* ata66_jmicron - Cable check
* @hwif: IDE port
*
@@ -129,32 +113,12 @@ static void config_jmicron_chipset_for_pio (ide_drive_t *drive, byte set_speed)
static int jmicron_tune_chipset (ide_drive_t *drive, byte xferspeed)
{
-
- u8 speed = ide_rate_filter(jmicron_ratemask(drive), xferspeed);
+ u8 speed = ide_rate_filter(drive, xferspeed);
return ide_config_drive_speed(drive, speed);
}
/**
- * config_chipset_for_dma - configure for DMA
- * @drive: drive to configure
- *
- * As the JMicron snoops for timings all we actually need to do is
- * make sure we don't set an invalid mode.
- */
-
-static int config_chipset_for_dma (ide_drive_t *drive)
-{
- u8 speed = ide_dma_speed(drive, jmicron_ratemask(drive));
-
- if (!speed)
- return 0;
-
- jmicron_tune_chipset(drive, speed);
- return ide_dma_enable(drive);
-}
-
-/**
* jmicron_configure_drive_for_dma - set up for DMA transfers
* @drive: drive we are going to set up
*
@@ -164,7 +128,7 @@ static int config_chipset_for_dma (ide_drive_t *drive)
static int jmicron_config_drive_for_dma (ide_drive_t *drive)
{
- if (ide_use_dma(drive) && config_chipset_for_dma(drive))
+ if (ide_tune_dma(drive))
return 0;
config_jmicron_chipset_for_pio(drive, 1);
diff --git a/drivers/ide/pci/pdc202xx_new.c b/drivers/ide/pci/pdc202xx_new.c
index 2cdd629c653..65b1e124edf 100644
--- a/drivers/ide/pci/pdc202xx_new.c
+++ b/drivers/ide/pci/pdc202xx_new.c
@@ -37,8 +37,6 @@
#include <asm/pci-bridge.h>
#endif
-#define PDC202_DEBUG_CABLE 0
-
#undef DEBUG
#ifdef DEBUG
@@ -82,16 +80,6 @@ static u8 max_dma_rate(struct pci_dev *pdev)
return mode;
}
-static u8 pdcnew_ratemask(ide_drive_t *drive)
-{
- u8 mode = max_dma_rate(HWIF(drive)->pci_dev);
-
- if (!eighty_ninty_three(drive))
- mode = min_t(u8, mode, 1);
-
- return mode;
-}
-
/**
* get_indexed_reg - Get indexed register
* @hwif: for the port address
@@ -164,7 +152,7 @@ static int pdcnew_tune_chipset(ide_drive_t *drive, u8 speed)
u8 adj = (drive->dn & 1) ? 0x08 : 0x00;
int err;
- speed = ide_rate_filter(pdcnew_ratemask(drive), speed);
+ speed = ide_rate_filter(drive, speed);
/*
* Issue SETFEATURES_XFER to the drive first. PDC202xx hardware will
@@ -244,17 +232,8 @@ static int config_chipset_for_dma(ide_drive_t *drive)
{
struct hd_driveid *id = drive->id;
ide_hwif_t *hwif = HWIF(drive);
- u8 ultra_66 = (id->dma_ultra & 0x0078) ? 1 : 0;
- u8 cable = pdcnew_cable_detect(hwif);
u8 speed;
- if (ultra_66 && cable) {
- printk(KERN_WARNING "Warning: %s channel "
- "requires an 80-pin cable for operation.\n",
- hwif->channel ? "Secondary" : "Primary");
- printk(KERN_WARNING "%s reduced to Ultra33 mode.\n", drive->name);
- }
-
if (id->capability & 4) {
/*
* Set IORDY_EN & PREFETCH_EN (this seems to have
@@ -267,7 +246,7 @@ static int config_chipset_for_dma(ide_drive_t *drive)
set_indexed_reg(hwif, 0x13 + adj, tmp | 0x03);
}
- speed = ide_dma_speed(drive, pdcnew_ratemask(drive));
+ speed = ide_max_dma_mode(drive);
if (!speed)
return 0;
@@ -543,7 +522,8 @@ static void __devinit init_hwif_pdc202new(ide_hwif_t *hwif)
hwif->drives[0].autotune = hwif->drives[1].autotune = 1;
hwif->atapi_dma = 1;
- hwif->ultra_mask = 0x7f;
+
+ hwif->ultra_mask = hwif->cds->udma_mask;
hwif->mwdma_mask = 0x07;
hwif->err_stops_fifo = 1;
@@ -556,11 +536,6 @@ static void __devinit init_hwif_pdc202new(ide_hwif_t *hwif)
if (!noautodma)
hwif->autodma = 1;
hwif->drives[0].autodma = hwif->drives[1].autodma = hwif->autodma;
-
-#if PDC202_DEBUG_CABLE
- printk(KERN_DEBUG "%s: %s-pin cable\n",
- hwif->name, hwif->udma_four ? "80" : "40");
-#endif /* PDC202_DEBUG_CABLE */
}
static int __devinit init_setup_pdcnew(struct pci_dev *dev, ide_pci_device_t *d)
@@ -619,6 +594,7 @@ static ide_pci_device_t pdcnew_chipsets[] __devinitdata = {
.channels = 2,
.autodma = AUTODMA,
.bootable = OFF_BOARD,
+ .udma_mask = 0x3f, /* udma0-5 */
},{ /* 1 */
.name = "PDC20269",
.init_setup = init_setup_pdcnew,
@@ -627,6 +603,7 @@ static ide_pci_device_t pdcnew_chipsets[] __devinitdata = {
.channels = 2,
.autodma = AUTODMA,
.bootable = OFF_BOARD,
+ .udma_mask = 0x7f, /* udma0-6*/
},{ /* 2 */
.name = "PDC20270",
.init_setup = init_setup_pdc20270,
@@ -635,6 +612,7 @@ static ide_pci_device_t pdcnew_chipsets[] __devinitdata = {
.channels = 2,
.autodma = AUTODMA,
.bootable = OFF_BOARD,
+ .udma_mask = 0x3f, /* udma0-5 */
},{ /* 3 */
.name = "PDC20271",
.init_setup = init_setup_pdcnew,
@@ -643,6 +621,7 @@ static ide_pci_device_t pdcnew_chipsets[] __devinitdata = {
.channels = 2,
.autodma = AUTODMA,
.bootable = OFF_BOARD,
+ .udma_mask = 0x7f, /* udma0-6*/
},{ /* 4 */
.name = "PDC20275",
.init_setup = init_setup_pdcnew,
@@ -651,6 +630,7 @@ static ide_pci_device_t pdcnew_chipsets[] __devinitdata = {
.channels = 2,
.autodma = AUTODMA,
.bootable = OFF_BOARD,
+ .udma_mask = 0x7f, /* udma0-6*/
},{ /* 5 */
.name = "PDC20276",
.init_setup = init_setup_pdc20276,
@@ -659,6 +639,7 @@ static ide_pci_device_t pdcnew_chipsets[] __devinitdata = {
.channels = 2,
.autodma = AUTODMA,
.bootable = OFF_BOARD,
+ .udma_mask = 0x7f, /* udma0-6*/
},{ /* 6 */
.name = "PDC20277",
.init_setup = init_setup_pdcnew,
@@ -667,6 +648,7 @@ static ide_pci_device_t pdcnew_chipsets[] __devinitdata = {
.channels = 2,
.autodma = AUTODMA,
.bootable = OFF_BOARD,
+ .udma_mask = 0x7f, /* udma0-6*/
}
};
diff --git a/drivers/ide/pci/pdc202xx_old.c b/drivers/ide/pci/pdc202xx_old.c
index a7a639fe1ea..7146fe3f6ba 100644
--- a/drivers/ide/pci/pdc202xx_old.c
+++ b/drivers/ide/pci/pdc202xx_old.c
@@ -46,7 +46,6 @@
#include <asm/io.h>
#include <asm/irq.h>
-#define PDC202_DEBUG_CABLE 0
#define PDC202XX_DEBUG_DRIVE_INFO 0
static const char *pdc_quirk_drives[] = {
@@ -101,35 +100,12 @@ static const char *pdc_quirk_drives[] = {
#define MC1 0x02 /* DMA"C" timing */
#define MC0 0x01 /* DMA"C" timing */
-static u8 pdc202xx_ratemask (ide_drive_t *drive)
-{
- u8 mode;
-
- switch(HWIF(drive)->pci_dev->device) {
- case PCI_DEVICE_ID_PROMISE_20267:
- case PCI_DEVICE_ID_PROMISE_20265:
- mode = 3;
- break;
- case PCI_DEVICE_ID_PROMISE_20263:
- case PCI_DEVICE_ID_PROMISE_20262:
- mode = 2;
- break;
- case PCI_DEVICE_ID_PROMISE_20246:
- return 1;
- default:
- return 0;
- }
- if (!eighty_ninty_three(drive))
- mode = min(mode, (u8)1);
- return mode;
-}
-
static int pdc202xx_tune_chipset (ide_drive_t *drive, u8 xferspeed)
{
ide_hwif_t *hwif = HWIF(drive);
struct pci_dev *dev = hwif->pci_dev;
u8 drive_pci = 0x60 + (drive->dn << 2);
- u8 speed = ide_rate_filter(pdc202xx_ratemask(drive), xferspeed);
+ u8 speed = ide_rate_filter(drive, xferspeed);
u32 drive_conf;
u8 AP, BP, CP, DP;
@@ -261,20 +237,7 @@ static int config_chipset_for_dma (ide_drive_t *drive)
u32 drive_conf = 0;
u8 drive_pci = 0x60 + (drive->dn << 2);
u8 test1 = 0, test2 = 0, speed = -1;
- u8 AP = 0, cable = 0;
-
- u8 ultra_66 = ((id->dma_ultra & 0x0010) ||
- (id->dma_ultra & 0x0008)) ? 1 : 0;
-
- if (dev->device != PCI_DEVICE_ID_PROMISE_20246)
- cable = pdc202xx_old_cable_detect(hwif);
- else
- ultra_66 = 0;
-
- if (ultra_66 && cable) {
- printk(KERN_WARNING "Warning: %s channel requires an 80-pin cable for operation.\n", hwif->channel ? "Secondary":"Primary");
- printk(KERN_WARNING "%s reduced to Ultra33 mode.\n", drive->name);
- }
+ u8 AP = 0;
if (dev->device != PCI_DEVICE_ID_PROMISE_20246)
pdc_old_disable_66MHz_clock(drive->hwif);
@@ -308,7 +271,7 @@ chipset_is_set:
if (drive->media == ide_disk) /* PREFETCH_EN */
pci_write_config_byte(dev, (drive_pci), AP|PREFETCH_EN);
- speed = ide_dma_speed(drive, pdc202xx_ratemask(drive));
+ speed = ide_max_dma_mode(drive);
if (!(speed)) {
/* restore original pci-config space */
@@ -478,7 +441,7 @@ static void __devinit init_hwif_pdc202xx(ide_hwif_t *hwif)
hwif->drives[0].autotune = hwif->drives[1].autotune = 1;
- hwif->ultra_mask = 0x3f;
+ hwif->ultra_mask = hwif->cds->udma_mask;
hwif->mwdma_mask = 0x07;
hwif->swdma_mask = 0x07;
hwif->atapi_dma = 1;
@@ -500,10 +463,6 @@ static void __devinit init_hwif_pdc202xx(ide_hwif_t *hwif)
if (!noautodma)
hwif->autodma = 1;
hwif->drives[0].autodma = hwif->drives[1].autodma = hwif->autodma;
-#if PDC202_DEBUG_CABLE
- printk(KERN_DEBUG "%s: %s-pin cable\n",
- hwif->name, hwif->udma_four ? "80" : "40");
-#endif /* PDC202_DEBUG_CABLE */
}
static void __devinit init_dma_pdc202xx(ide_hwif_t *hwif, unsigned long dmabase)
@@ -587,6 +546,7 @@ static ide_pci_device_t pdc202xx_chipsets[] __devinitdata = {
.autodma = AUTODMA,
.bootable = OFF_BOARD,
.extra = 16,
+ .udma_mask = 0x07, /* udma0-2 */
},{ /* 1 */
.name = "PDC20262",
.init_setup = init_setup_pdc202ata4,
@@ -597,6 +557,7 @@ static ide_pci_device_t pdc202xx_chipsets[] __devinitdata = {
.autodma = AUTODMA,
.bootable = OFF_BOARD,
.extra = 48,
+ .udma_mask = 0x1f, /* udma0-4 */
},{ /* 2 */
.name = "PDC20263",
.init_setup = init_setup_pdc202ata4,
@@ -607,6 +568,7 @@ static ide_pci_device_t pdc202xx_chipsets[] __devinitdata = {
.autodma = AUTODMA,
.bootable = OFF_BOARD,
.extra = 48,
+ .udma_mask = 0x1f, /* udma0-4 */
},{ /* 3 */
.name = "PDC20265",
.init_setup = init_setup_pdc20265,
@@ -617,6 +579,7 @@ static ide_pci_device_t pdc202xx_chipsets[] __devinitdata = {
.autodma = AUTODMA,
.bootable = OFF_BOARD,
.extra = 48,
+ .udma_mask = 0x3f, /* udma0-5 */
},{ /* 4 */
.name = "PDC20267",
.init_setup = init_setup_pdc202xx,
@@ -627,6 +590,7 @@ static ide_pci_device_t pdc202xx_chipsets[] __devinitdata = {
.autodma = AUTODMA,
.bootable = OFF_BOARD,
.extra = 48,
+ .udma_mask = 0x3f, /* udma0-5 */
}
};
diff --git a/drivers/ide/pci/piix.c b/drivers/ide/pci/piix.c
index 061d300ab8b..8b219dd6302 100644
--- a/drivers/ide/pci/piix.c
+++ b/drivers/ide/pci/piix.c
@@ -106,68 +106,6 @@
static int no_piix_dma;
/**
- * piix_ratemask - compute rate mask for PIIX IDE
- * @drive: IDE drive to compute for
- *
- * Returns the available modes for the PIIX IDE controller.
- */
-
-static u8 piix_ratemask (ide_drive_t *drive)
-{
- struct pci_dev *dev = HWIF(drive)->pci_dev;
- u8 mode;
-
- switch(dev->device) {
- case PCI_DEVICE_ID_INTEL_82801EB_1:
- mode = 3;
- break;
- /* UDMA 100 capable */
- case PCI_DEVICE_ID_INTEL_82801BA_8:
- case PCI_DEVICE_ID_INTEL_82801BA_9:
- case PCI_DEVICE_ID_INTEL_82801CA_10:
- case PCI_DEVICE_ID_INTEL_82801CA_11:
- case PCI_DEVICE_ID_INTEL_82801E_11:
- case PCI_DEVICE_ID_INTEL_82801DB_1:
- case PCI_DEVICE_ID_INTEL_82801DB_10:
- case PCI_DEVICE_ID_INTEL_82801DB_11:
- case PCI_DEVICE_ID_INTEL_82801EB_11:
- case PCI_DEVICE_ID_INTEL_ESB_2:
- case PCI_DEVICE_ID_INTEL_ICH6_19:
- case PCI_DEVICE_ID_INTEL_ICH7_21:
- case PCI_DEVICE_ID_INTEL_ESB2_18:
- case PCI_DEVICE_ID_INTEL_ICH8_6:
- mode = 3;
- break;
- /* UDMA 66 capable */
- case PCI_DEVICE_ID_INTEL_82801AA_1:
- case PCI_DEVICE_ID_INTEL_82372FB_1:
- mode = 2;
- break;
- /* UDMA 33 capable */
- case PCI_DEVICE_ID_INTEL_82371AB:
- case PCI_DEVICE_ID_INTEL_82443MX_1:
- case PCI_DEVICE_ID_INTEL_82451NX:
- case PCI_DEVICE_ID_INTEL_82801AB_1:
- return 1;
- /* Non UDMA capable (MWDMA2) */
- case PCI_DEVICE_ID_INTEL_82371SB_1:
- case PCI_DEVICE_ID_INTEL_82371FB_1:
- case PCI_DEVICE_ID_INTEL_82371FB_0:
- case PCI_DEVICE_ID_INTEL_82371MX:
- default:
- return 0;
- }
-
- /*
- * If we are UDMA66 capable fall back to UDMA33
- * if the drive cannot see an 80pin cable.
- */
- if (!eighty_ninty_three(drive))
- mode = min_t(u8, mode, 1);
- return mode;
-}
-
-/**
* piix_dma_2_pio - return the PIO mode matching DMA
* @xfer_rate: transfer speed
*
@@ -301,7 +239,7 @@ static int piix_tune_chipset (ide_drive_t *drive, u8 xferspeed)
ide_hwif_t *hwif = HWIF(drive);
struct pci_dev *dev = hwif->pci_dev;
u8 maslave = hwif->channel ? 0x42 : 0x40;
- u8 speed = ide_rate_filter(piix_ratemask(drive), xferspeed);
+ u8 speed = ide_rate_filter(drive, xferspeed);
int a_speed = 3 << (drive->dn * 4);
int u_flag = 1 << drive->dn;
int v_flag = 0x01 << drive->dn;
@@ -366,30 +304,6 @@ static int piix_tune_chipset (ide_drive_t *drive, u8 xferspeed)
}
/**
- * piix_config_drive_for_dma - configure drive for DMA
- * @drive: IDE drive to configure
- *
- * Set up a PIIX interface channel for the best available speed.
- * We prefer UDMA if it is available and then MWDMA. If DMA is
- * not available we switch to PIO and return 0.
- */
-
-static int piix_config_drive_for_dma (ide_drive_t *drive)
-{
- u8 speed = ide_dma_speed(drive, piix_ratemask(drive));
-
- /*
- * If no DMA speed was available or the chipset has DMA bugs
- * then disable DMA and use PIO
- */
- if (!speed)
- return 0;
-
- (void) piix_tune_chipset(drive, speed);
- return ide_dma_enable(drive);
-}
-
-/**
* piix_config_drive_xfer_rate - set up an IDE device
* @drive: IDE drive to configure
*
@@ -401,7 +315,7 @@ static int piix_config_drive_xfer_rate (ide_drive_t *drive)
{
drive->init_speed = 0;
- if (ide_use_dma(drive) && piix_config_drive_for_dma(drive))
+ if (ide_tune_dma(drive))
return 0;
if (ide_use_fast_pio(drive))
@@ -524,26 +438,14 @@ static void __devinit init_hwif_piix(ide_hwif_t *hwif)
hwif->ide_dma_clear_irq = &piix_dma_clear_irq;
hwif->atapi_dma = 1;
- hwif->ultra_mask = 0x3f;
+
+ hwif->ultra_mask = hwif->cds->udma_mask;
hwif->mwdma_mask = 0x06;
hwif->swdma_mask = 0x04;
- switch(hwif->pci_dev->device) {
- case PCI_DEVICE_ID_INTEL_82371FB_0:
- case PCI_DEVICE_ID_INTEL_82371FB_1:
- case PCI_DEVICE_ID_INTEL_82371SB_1:
- hwif->ultra_mask = 0x80;
- break;
- case PCI_DEVICE_ID_INTEL_82371AB:
- case PCI_DEVICE_ID_INTEL_82443MX_1:
- case PCI_DEVICE_ID_INTEL_82451NX:
- case PCI_DEVICE_ID_INTEL_82801AB_1:
- hwif->ultra_mask = 0x07;
- break;
- default:
- if (!hwif->udma_four)
- hwif->udma_four = piix_cable_detect(hwif);
- break;
+ if (hwif->ultra_mask & 0x78) {
+ if (!hwif->udma_four)
+ hwif->udma_four = piix_cable_detect(hwif);
}
if (no_piix_dma)
@@ -557,7 +459,7 @@ static void __devinit init_hwif_piix(ide_hwif_t *hwif)
hwif->drives[0].autodma = hwif->autodma;
}
-#define DECLARE_PIIX_DEV(name_str) \
+#define DECLARE_PIIX_DEV(name_str, udma) \
{ \
.name = name_str, \
.init_chipset = init_chipset_piix, \
@@ -566,11 +468,12 @@ static void __devinit init_hwif_piix(ide_hwif_t *hwif)
.autodma = AUTODMA, \
.enablebits = {{0x41,0x80,0x80}, {0x43,0x80,0x80}}, \
.bootable = ON_BOARD, \
+ .udma_mask = udma, \
}
static ide_pci_device_t piix_pci_info[] __devinitdata = {
- /* 0 */ DECLARE_PIIX_DEV("PIIXa"),
- /* 1 */ DECLARE_PIIX_DEV("PIIXb"),
+ /* 0 */ DECLARE_PIIX_DEV("PIIXa", 0x00), /* no udma */
+ /* 1 */ DECLARE_PIIX_DEV("PIIXb", 0x00), /* no udma */
/* 2 */
{ /*
@@ -587,28 +490,28 @@ static ide_pci_device_t piix_pci_info[] __devinitdata = {
.flags = IDEPCI_FLAG_ISA_PORTS
},
- /* 3 */ DECLARE_PIIX_DEV("PIIX3"),
- /* 4 */ DECLARE_PIIX_DEV("PIIX4"),
- /* 5 */ DECLARE_PIIX_DEV("ICH0"),
- /* 6 */ DECLARE_PIIX_DEV("PIIX4"),
- /* 7 */ DECLARE_PIIX_DEV("ICH"),
- /* 8 */ DECLARE_PIIX_DEV("PIIX4"),
- /* 9 */ DECLARE_PIIX_DEV("PIIX4"),
- /* 10 */ DECLARE_PIIX_DEV("ICH2"),
- /* 11 */ DECLARE_PIIX_DEV("ICH2M"),
- /* 12 */ DECLARE_PIIX_DEV("ICH3M"),
- /* 13 */ DECLARE_PIIX_DEV("ICH3"),
- /* 14 */ DECLARE_PIIX_DEV("ICH4"),
- /* 15 */ DECLARE_PIIX_DEV("ICH5"),
- /* 16 */ DECLARE_PIIX_DEV("C-ICH"),
- /* 17 */ DECLARE_PIIX_DEV("ICH4"),
- /* 18 */ DECLARE_PIIX_DEV("ICH5-SATA"),
- /* 19 */ DECLARE_PIIX_DEV("ICH5"),
- /* 20 */ DECLARE_PIIX_DEV("ICH6"),
- /* 21 */ DECLARE_PIIX_DEV("ICH7"),
- /* 22 */ DECLARE_PIIX_DEV("ICH4"),
- /* 23 */ DECLARE_PIIX_DEV("ESB2"),
- /* 24 */ DECLARE_PIIX_DEV("ICH8M"),
+ /* 3 */ DECLARE_PIIX_DEV("PIIX3", 0x00), /* no udma */
+ /* 4 */ DECLARE_PIIX_DEV("PIIX4", 0x07), /* udma0-2 */
+ /* 5 */ DECLARE_PIIX_DEV("ICH0", 0x07), /* udma0-2 */
+ /* 6 */ DECLARE_PIIX_DEV("PIIX4", 0x07), /* udma0-2 */
+ /* 7 */ DECLARE_PIIX_DEV("ICH", 0x1f), /* udma0-4 */
+ /* 8 */ DECLARE_PIIX_DEV("PIIX4", 0x1f), /* udma0-4 */
+ /* 9 */ DECLARE_PIIX_DEV("PIIX4", 0x07), /* udma0-2 */
+ /* 10 */ DECLARE_PIIX_DEV("ICH2", 0x3f), /* udma0-5 */
+ /* 11 */ DECLARE_PIIX_DEV("ICH2M", 0x3f), /* udma0-5 */
+ /* 12 */ DECLARE_PIIX_DEV("ICH3M", 0x3f), /* udma0-5 */
+ /* 13 */ DECLARE_PIIX_DEV("ICH3", 0x3f), /* udma0-5 */
+ /* 14 */ DECLARE_PIIX_DEV("ICH4", 0x3f), /* udma0-5 */
+ /* 15 */ DECLARE_PIIX_DEV("ICH5", 0x3f), /* udma0-5 */
+ /* 16 */ DECLARE_PIIX_DEV("C-ICH", 0x3f), /* udma0-5 */
+ /* 17 */ DECLARE_PIIX_DEV("ICH4", 0x3f), /* udma0-5 */
+ /* 18 */ DECLARE_PIIX_DEV("ICH5-SATA", 0x3f), /* udma0-5 */
+ /* 19 */ DECLARE_PIIX_DEV("ICH5", 0x3f), /* udma0-5 */
+ /* 20 */ DECLARE_PIIX_DEV("ICH6", 0x3f), /* udma0-5 */
+ /* 21 */ DECLARE_PIIX_DEV("ICH7", 0x3f), /* udma0-5 */
+ /* 22 */ DECLARE_PIIX_DEV("ICH4", 0x3f), /* udma0-5 */
+ /* 23 */ DECLARE_PIIX_DEV("ESB2", 0x3f), /* udma0-5 */
+ /* 24 */ DECLARE_PIIX_DEV("ICH8M", 0x3f), /* udma0-5 */
};
/**
diff --git a/drivers/ide/pci/scc_pata.c b/drivers/ide/pci/scc_pata.c
index f84bf791f72..cbf93632535 100644
--- a/drivers/ide/pci/scc_pata.c
+++ b/drivers/ide/pci/scc_pata.c
@@ -190,23 +190,6 @@ scc_ide_outsl(unsigned long port, void *addr, u32 count)
}
/**
- * scc_ratemask - Compute available modes
- * @drive: IDE drive
- *
- * Compute the available speeds for the devices on the interface.
- * Enforce UDMA33 as a limit if there is no 80pin cable present.
- */
-
-static u8 scc_ratemask(ide_drive_t *drive)
-{
- u8 mode = 4;
-
- if (!eighty_ninty_three(drive))
- mode = min(mode, (u8)1);
- return mode;
-}
-
-/**
* scc_tuneproc - tune a drive PIO mode
* @drive: drive to tune
* @mode_wanted: the target operating mode
@@ -273,7 +256,7 @@ static void scc_tuneproc(ide_drive_t *drive, byte mode_wanted)
static int scc_tune_chipset(ide_drive_t *drive, byte xferspeed)
{
ide_hwif_t *hwif = HWIF(drive);
- u8 speed = ide_rate_filter(scc_ratemask(drive), xferspeed);
+ u8 speed = ide_rate_filter(drive, xferspeed);
struct scc_ports *ports = ide_get_hwifdata(hwif);
unsigned long ctl_base = ports->ctl;
unsigned long cckctrl_port = ctl_base + 0xff0;
@@ -347,7 +330,7 @@ static int scc_tune_chipset(ide_drive_t *drive, byte xferspeed)
static int scc_config_chipset_for_dma(ide_drive_t *drive)
{
- u8 speed = ide_dma_speed(drive, scc_ratemask(drive));
+ u8 speed = ide_max_dma_mode(drive);
if (!speed)
return 0;
diff --git a/drivers/ide/pci/serverworks.c b/drivers/ide/pci/serverworks.c
index dbcd37a0c65..2fa6d92d16c 100644
--- a/drivers/ide/pci/serverworks.c
+++ b/drivers/ide/pci/serverworks.c
@@ -65,16 +65,16 @@ static int check_in_drive_lists (ide_drive_t *drive, const char **list)
return 0;
}
-static u8 svwks_ratemask (ide_drive_t *drive)
+static u8 svwks_udma_filter(ide_drive_t *drive)
{
struct pci_dev *dev = HWIF(drive)->pci_dev;
- u8 mode = 0;
+ u8 mask = 0;
if (!svwks_revision)
pci_read_config_byte(dev, PCI_REVISION_ID, &svwks_revision);
if (dev->device == PCI_DEVICE_ID_SERVERWORKS_HT1000IDE)
- return 2;
+ return 0x1f;
if (dev->device == PCI_DEVICE_ID_SERVERWORKS_OSB4IDE) {
u32 reg = 0;
if (isa_dev)
@@ -86,25 +86,31 @@ static u8 svwks_ratemask (ide_drive_t *drive)
if(drive->media == ide_disk)
return 0;
/* Check the OSB4 DMA33 enable bit */
- return ((reg & 0x00004000) == 0x00004000) ? 1 : 0;
+ return ((reg & 0x00004000) == 0x00004000) ? 0x07 : 0;
} else if (svwks_revision < SVWKS_CSB5_REVISION_NEW) {
- return 1;
+ return 0x07;
} else if (svwks_revision >= SVWKS_CSB5_REVISION_NEW) {
- u8 btr = 0;
+ u8 btr = 0, mode;
pci_read_config_byte(dev, 0x5A, &btr);
mode = btr & 0x3;
- if (!eighty_ninty_three(drive))
- mode = min(mode, (u8)1);
+
/* If someone decides to do UDMA133 on CSB5 the same
issue will bite so be inclusive */
if (mode > 2 && check_in_drive_lists(drive, svwks_bad_ata100))
mode = 2;
+
+ switch(mode) {
+ case 2: mask = 0x1f; break;
+ case 1: mask = 0x07; break;
+ default: mask = 0x00; break;
+ }
}
if (((dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE) ||
(dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2)) &&
(!(PCI_FUNC(dev->devfn) & 1)))
- mode = 2;
- return mode;
+ mask = 0x1f;
+
+ return mask;
}
static u8 svwks_csb_check (struct pci_dev *dev)
@@ -141,7 +147,7 @@ static int svwks_tune_chipset (ide_drive_t *drive, u8 xferspeed)
if (xferspeed == 255) /* PIO auto-tuning */
speed = XFER_PIO_0 + pio;
else
- speed = ide_rate_filter(svwks_ratemask(drive), xferspeed);
+ speed = ide_rate_filter(drive, xferspeed);
/* If we are about to put a disk into UDMA mode we screwed up.
Our code assumes we never _ever_ do this on an OSB4 */
@@ -304,7 +310,7 @@ static void svwks_tune_drive (ide_drive_t *drive, u8 pio)
static int config_chipset_for_dma (ide_drive_t *drive)
{
- u8 speed = ide_dma_speed(drive, svwks_ratemask(drive));
+ u8 speed = ide_max_dma_mode(drive);
if (!(speed))
speed = XFER_PIO_0 + ide_get_best_pio_mode(drive, 255, 5, NULL);
@@ -500,6 +506,7 @@ static void __devinit init_hwif_svwks (ide_hwif_t *hwif)
hwif->tuneproc = &svwks_tune_drive;
hwif->speedproc = &svwks_tune_chipset;
+ hwif->udma_filter = &svwks_udma_filter;
hwif->atapi_dma = 1;
diff --git a/drivers/ide/pci/sgiioc4.c b/drivers/ide/pci/sgiioc4.c
index fd09b295a69..d3185e29a38 100644
--- a/drivers/ide/pci/sgiioc4.c
+++ b/drivers/ide/pci/sgiioc4.c
@@ -692,7 +692,7 @@ sgiioc4_ide_setup_pci_device(struct pci_dev *dev, ide_pci_device_t * d)
return -EIO;
/* Create /proc/ide entries */
- create_proc_ide_interfaces();
+ ide_proc_register_port(hwif);
return 0;
}
diff --git a/drivers/ide/pci/siimage.c b/drivers/ide/pci/siimage.c
index c0188de3cc6..d09e74c2996 100644
--- a/drivers/ide/pci/siimage.c
+++ b/drivers/ide/pci/siimage.c
@@ -122,45 +122,41 @@ static inline unsigned long siimage_seldev(ide_drive_t *drive, int r)
}
/**
- * siimage_ratemask - Compute available modes
- * @drive: IDE drive
+ * sil_udma_filter - compute UDMA mask
+ * @drive: IDE device
+ *
+ * Compute the available UDMA speeds for the device on the interface.
*
- * Compute the available speeds for the devices on the interface.
* For the CMD680 this depends on the clocking mode (scsc), for the
- * SI3312 SATA controller life is a bit simpler. Enforce UDMA33
- * as a limit if there is no 80pin cable present.
+ * SI3112 SATA controller life is a bit simpler.
*/
-
-static byte siimage_ratemask (ide_drive_t *drive)
+
+static u8 sil_udma_filter(ide_drive_t *drive)
{
- ide_hwif_t *hwif = HWIF(drive);
- u8 mode = 0, scsc = 0;
+ ide_hwif_t *hwif = drive->hwif;
unsigned long base = (unsigned long) hwif->hwif_data;
+ u8 mask = 0, scsc = 0;
if (hwif->mmio)
scsc = hwif->INB(base + 0x4A);
else
pci_read_config_byte(hwif->pci_dev, 0x8A, &scsc);
- if(is_sata(hwif))
- {
- if(strstr(drive->id->model, "Maxtor"))
- return 3;
- return 4;
+ if (is_sata(hwif)) {
+ mask = strstr(drive->id->model, "Maxtor") ? 0x3f : 0x7f;
+ goto out;
}
-
+
if ((scsc & 0x30) == 0x10) /* 133 */
- mode = 4;
+ mask = 0x7f;
else if ((scsc & 0x30) == 0x20) /* 2xPCI */
- mode = 4;
+ mask = 0x7f;
else if ((scsc & 0x30) == 0x00) /* 100 */
- mode = 3;
+ mask = 0x3f;
else /* Disabled ? */
BUG();
-
- if (!eighty_ninty_three(drive))
- mode = min(mode, (u8)1);
- return mode;
+out:
+ return mask;
}
/**
@@ -306,7 +302,7 @@ static int siimage_tune_chipset (ide_drive_t *drive, byte xferspeed)
ide_hwif_t *hwif = HWIF(drive);
u16 ultra = 0, multi = 0;
u8 mode = 0, unit = drive->select.b.unit;
- u8 speed = ide_rate_filter(siimage_ratemask(drive), xferspeed);
+ u8 speed = ide_rate_filter(drive, xferspeed);
unsigned long base = (unsigned long)hwif->hwif_data;
u8 scsc = 0, addr_mask = ((hwif->channel) ?
((hwif->mmio) ? 0xF4 : 0x84) :
@@ -389,7 +385,7 @@ static int siimage_tune_chipset (ide_drive_t *drive, byte xferspeed)
static int config_chipset_for_dma (ide_drive_t *drive)
{
- u8 speed = ide_dma_speed(drive, siimage_ratemask(drive));
+ u8 speed = ide_max_dma_mode(drive);
if (!speed)
return 0;
@@ -831,7 +827,7 @@ static void __devinit init_mmio_iops_siimage(ide_hwif_t *hwif)
/*
* Now set up the hw. We have to do this ourselves as
- * the MMIO layout isnt the same as the the standard port
+ * the MMIO layout isnt the same as the standard port
* based I/O
*/
@@ -989,6 +985,7 @@ static void __devinit init_hwif_siimage(ide_hwif_t *hwif)
hwif->tuneproc = &siimage_tuneproc;
hwif->reset_poll = &siimage_reset_poll;
hwif->pre_reset = &siimage_pre_reset;
+ hwif->udma_filter = &sil_udma_filter;
if(is_sata(hwif)) {
static int first = 1;
diff --git a/drivers/ide/pci/sis5513.c b/drivers/ide/pci/sis5513.c
index 2ba0669f36a..2bde1b92784 100644
--- a/drivers/ide/pci/sis5513.c
+++ b/drivers/ide/pci/sis5513.c
@@ -191,7 +191,7 @@ static char* chipset_capability[] = {
"ATA 133 (1st gen)", "ATA 133 (2nd gen)"
};
-#if defined(DISPLAY_SIS_TIMINGS) && defined(CONFIG_PROC_FS)
+#if defined(DISPLAY_SIS_TIMINGS) && defined(CONFIG_IDE_PROC_FS)
#include <linux/stat.h>
#include <linux/proc_fs.h>
@@ -426,17 +426,7 @@ static int sis_get_info (char *buffer, char **addr, off_t offset, int count)
return len > count ? count : len;
}
-#endif /* defined(DISPLAY_SIS_TIMINGS) && defined(CONFIG_PROC_FS) */
-
-static u8 sis5513_ratemask (ide_drive_t *drive)
-{
- u8 rates[] = { 0, 0, 1, 2, 3, 3, 4, 4 };
- u8 mode = rates[chipset_family];
-
- if (!eighty_ninty_three(drive))
- mode = min(mode, (u8)1);
- return mode;
-}
+#endif /* defined(DISPLAY_SIS_TIMINGS) && defined(CONFIG_IDE_PROC_FS) */
/*
* Configuration functions
@@ -563,7 +553,7 @@ static int sis5513_tune_chipset (ide_drive_t *drive, u8 xferspeed)
u8 drive_pci, reg, speed;
u32 regdw;
- speed = ide_rate_filter(sis5513_ratemask(drive), xferspeed);
+ speed = ide_rate_filter(drive, xferspeed);
/* See config_art_rwp_pio for drive pci config registers */
drive_pci = 0x40;
@@ -648,32 +638,13 @@ static void sis5513_tune_drive (ide_drive_t *drive, u8 pio)
(void) config_chipset_for_pio(drive, pio);
}
-/*
- * ((id->hw_config & 0x4000|0x2000) && (HWIF(drive)->udma_four))
- */
-static int config_chipset_for_dma (ide_drive_t *drive)
-{
- u8 speed = ide_dma_speed(drive, sis5513_ratemask(drive));
-
-#ifdef DEBUG
- printk("SIS5513: config_chipset_for_dma, drive %d, ultra %x\n",
- drive->dn, drive->id->dma_ultra);
-#endif
-
- if (!(speed))
- return 0;
-
- sis5513_tune_chipset(drive, speed);
- return ide_dma_enable(drive);
-}
-
static int sis5513_config_xfer_rate(ide_drive_t *drive)
{
config_art_rwp_pio(drive, 5);
drive->init_speed = 0;
- if (ide_use_dma(drive) && config_chipset_for_dma(drive))
+ if (ide_tune_dma(drive))
return 0;
if (ide_use_fast_pio(drive))
@@ -826,7 +797,7 @@ static unsigned int __devinit init_chipset_sis5513 (struct pci_dev *dev, const c
break;
}
-#if defined(DISPLAY_SIS_TIMINGS) && defined(CONFIG_PROC_FS)
+#if defined(DISPLAY_SIS_TIMINGS) && defined(CONFIG_IDE_PROC_FS)
if (!sis_proc) {
sis_proc = 1;
bmide_dev = dev;
@@ -858,6 +829,8 @@ static unsigned int __devinit ata66_sis5513 (ide_hwif_t *hwif)
static void __devinit init_hwif_sis5513 (ide_hwif_t *hwif)
{
+ u8 udma_rates[] = { 0x00, 0x00, 0x07, 0x1f, 0x3f, 0x3f, 0x7f, 0x7f };
+
hwif->autodma = 0;
if (!hwif->irq)
@@ -873,7 +846,8 @@ static void __devinit init_hwif_sis5513 (ide_hwif_t *hwif)
}
hwif->atapi_dma = 1;
- hwif->ultra_mask = 0x7f;
+
+ hwif->ultra_mask = udma_rates[chipset_family];
hwif->mwdma_mask = 0x07;
hwif->swdma_mask = 0x07;
diff --git a/drivers/ide/pci/slc90e66.c b/drivers/ide/pci/slc90e66.c
index 852ccb36da1..c40f291f91e 100644
--- a/drivers/ide/pci/slc90e66.c
+++ b/drivers/ide/pci/slc90e66.c
@@ -21,15 +21,6 @@
#include <asm/io.h>
-static u8 slc90e66_ratemask (ide_drive_t *drive)
-{
- u8 mode = 2;
-
- if (!eighty_ninty_three(drive))
- mode = min_t(u8, mode, 1);
- return mode;
-}
-
static u8 slc90e66_dma_2_pio (u8 xfer_rate) {
switch(xfer_rate) {
case XFER_UDMA_4:
@@ -122,7 +113,7 @@ static int slc90e66_tune_chipset (ide_drive_t *drive, u8 xferspeed)
ide_hwif_t *hwif = HWIF(drive);
struct pci_dev *dev = hwif->pci_dev;
u8 maslave = hwif->channel ? 0x42 : 0x40;
- u8 speed = ide_rate_filter(slc90e66_ratemask(drive), xferspeed);
+ u8 speed = ide_rate_filter(drive, xferspeed);
int sitre = 0, a_speed = 7 << (drive->dn * 4);
int u_speed = 0, u_flag = 1 << drive->dn;
u16 reg4042, reg44, reg48, reg4a;
@@ -169,22 +160,11 @@ static int slc90e66_tune_chipset (ide_drive_t *drive, u8 xferspeed)
return ide_config_drive_speed(drive, speed);
}
-static int slc90e66_config_drive_for_dma (ide_drive_t *drive)
-{
- u8 speed = ide_dma_speed(drive, slc90e66_ratemask(drive));
-
- if (!speed)
- return 0;
-
- (void) slc90e66_tune_chipset(drive, speed);
- return ide_dma_enable(drive);
-}
-
static int slc90e66_config_drive_xfer_rate (ide_drive_t *drive)
{
drive->init_speed = 0;
- if (ide_use_dma(drive) && slc90e66_config_drive_for_dma(drive))
+ if (ide_tune_dma(drive))
return 0;
if (ide_use_fast_pio(drive))
diff --git a/drivers/ide/pci/tc86c001.c b/drivers/ide/pci/tc86c001.c
index 0b6d81d6ce4..cee619bb2ea 100644
--- a/drivers/ide/pci/tc86c001.c
+++ b/drivers/ide/pci/tc86c001.c
@@ -13,18 +13,13 @@
#include <linux/pci.h>
#include <linux/ide.h>
-static inline u8 tc86c001_ratemask(ide_drive_t *drive)
-{
- return eighty_ninty_three(drive) ? 2 : 1;
-}
-
static int tc86c001_tune_chipset(ide_drive_t *drive, u8 speed)
{
ide_hwif_t *hwif = HWIF(drive);
unsigned long scr_port = hwif->config_data + (drive->dn ? 0x02 : 0x00);
u16 mode, scr = hwif->INW(scr_port);
- speed = ide_rate_filter(tc86c001_ratemask(drive), speed);
+ speed = ide_rate_filter(drive, speed);
switch (speed) {
case XFER_UDMA_4: mode = 0x00c0; break;
@@ -172,20 +167,9 @@ static int tc86c001_busproc(ide_drive_t *drive, int state)
return 0;
}
-static int config_chipset_for_dma(ide_drive_t *drive)
-{
- u8 speed = ide_dma_speed(drive, tc86c001_ratemask(drive));
-
- if (!speed)
- return 0;
-
- (void) tc86c001_tune_chipset(drive, speed);
- return ide_dma_enable(drive);
-}
-
static int tc86c001_config_drive_xfer_rate(ide_drive_t *drive)
{
- if (ide_use_dma(drive) && config_chipset_for_dma(drive))
+ if (ide_tune_dma(drive))
return 0;
if (ide_use_fast_pio(drive))
diff --git a/drivers/ide/pci/triflex.c b/drivers/ide/pci/triflex.c
index 5e06179c346..35e8c612638 100644
--- a/drivers/ide/pci/triflex.c
+++ b/drivers/ide/pci/triflex.c
@@ -48,7 +48,7 @@ static int triflex_tune_chipset(ide_drive_t *drive, u8 xferspeed)
u16 timing = 0;
u32 triflex_timings = 0;
u8 unit = (drive->select.b.unit & 0x01);
- u8 speed = ide_rate_filter(0, xferspeed);
+ u8 speed = ide_rate_filter(drive, xferspeed);
pci_read_config_dword(dev, channel_offset, &triflex_timings);
@@ -100,20 +100,9 @@ static void triflex_tune_drive(ide_drive_t *drive, u8 pio)
(void) triflex_tune_chipset(drive, (XFER_PIO_0 + use_pio));
}
-static int triflex_config_drive_for_dma(ide_drive_t *drive)
-{
- int speed = ide_dma_speed(drive, 0); /* No ultra speeds */
-
- if (!speed)
- return 0;
-
- (void) triflex_tune_chipset(drive, speed);
- return ide_dma_enable(drive);
-}
-
static int triflex_config_drive_xfer_rate(ide_drive_t *drive)
{
- if (ide_use_dma(drive) && triflex_config_drive_for_dma(drive))
+ if (ide_tune_dma(drive))
return 0;
triflex_tune_drive(drive, 255);
diff --git a/drivers/ide/ppc/pmac.c b/drivers/ide/ppc/pmac.c
index a49ebe44bab..45fc36f0f21 100644
--- a/drivers/ide/ppc/pmac.c
+++ b/drivers/ide/ppc/pmac.c
@@ -1276,6 +1276,8 @@ pmac_ide_setup_device(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif)
/* We probe the hwif now */
probe_hwif_init(hwif);
+ ide_proc_register_port(hwif);
+
return 0;
}
diff --git a/drivers/ide/setup-pci.c b/drivers/ide/setup-pci.c
index 118fb3205ca..67035ba4bf5 100644
--- a/drivers/ide/setup-pci.c
+++ b/drivers/ide/setup-pci.c
@@ -702,6 +702,7 @@ out:
int ide_setup_pci_device(struct pci_dev *dev, ide_pci_device_t *d)
{
+ ide_hwif_t *hwif = NULL, *mate = NULL;
ata_index_t index_list;
int ret;
@@ -710,11 +711,19 @@ int ide_setup_pci_device(struct pci_dev *dev, ide_pci_device_t *d)
goto out;
if ((index_list.b.low & 0xf0) != 0xf0)
- probe_hwif_init_with_fixup(&ide_hwifs[index_list.b.low], d->fixup);
+ hwif = &ide_hwifs[index_list.b.low];
if ((index_list.b.high & 0xf0) != 0xf0)
- probe_hwif_init_with_fixup(&ide_hwifs[index_list.b.high], d->fixup);
+ mate = &ide_hwifs[index_list.b.high];
- create_proc_ide_interfaces();
+ if (hwif)
+ probe_hwif_init_with_fixup(hwif, d->fixup);
+ if (mate)
+ probe_hwif_init_with_fixup(mate, d->fixup);
+
+ if (hwif)
+ ide_proc_register_port(hwif);
+ if (mate)
+ ide_proc_register_port(mate);
out:
return ret;
}
@@ -748,13 +757,22 @@ int ide_setup_pci_devices(struct pci_dev *dev1, struct pci_dev *dev2,
}
}
- create_proc_ide_interfaces();
+ for (i = 0; i < 2; i++) {
+ u8 idx[2] = { index_list[i].b.low, index_list[i].b.high };
+ int j;
+
+ for (j = 0; j < 2; j++) {
+ if ((idx[j] & 0xf0) != 0xf0)
+ ide_proc_register_port(ide_hwifs + idx[j]);
+ }
+ }
out:
return ret;
}
EXPORT_SYMBOL_GPL(ide_setup_pci_devices);
+#ifdef CONFIG_IDEPCI_PCIBUS_ORDER
/*
* Module interfaces
*/
@@ -861,3 +879,4 @@ void __init ide_scan_pcibus (int scan_direction)
__pci_register_driver(d, d->driver.owner, d->driver.mod_name);
}
}
+#endif
diff --git a/drivers/ieee1394/Kconfig b/drivers/ieee1394/Kconfig
index 61d7809a5a2..8012b3b0ce7 100644
--- a/drivers/ieee1394/Kconfig
+++ b/drivers/ieee1394/Kconfig
@@ -1,4 +1,7 @@
menu "IEEE 1394 (FireWire) support"
+ depends on PCI || BROKEN
+
+source "drivers/firewire/Kconfig"
config IEEE1394
tristate "IEEE 1394 (FireWire) support"
diff --git a/drivers/ieee1394/nodemgr.c b/drivers/ieee1394/nodemgr.c
index 6a1a0572275..835937e3852 100644
--- a/drivers/ieee1394/nodemgr.c
+++ b/drivers/ieee1394/nodemgr.c
@@ -1702,7 +1702,7 @@ static int nodemgr_host_thread(void *__hi)
generation = get_hpsb_generation(host);
/* If we get a reset before we are done waiting, then
- * start the the waiting over again */
+ * start the waiting over again */
if (generation != g)
g = generation, i = 0;
}
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index 66b36de9fa6..994decc7bcf 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -1,4 +1,5 @@
menu "InfiniBand support"
+ depends on HAS_IOMEM
config INFINIBAND
depends on PCI || BROKEN
@@ -29,6 +30,11 @@ config INFINIBAND_USER_ACCESS
libibverbs, libibcm and a hardware driver library from
<http://www.openib.org>.
+config INFINIBAND_USER_MEM
+ bool
+ depends on INFINIBAND_USER_ACCESS != n
+ default y
+
config INFINIBAND_ADDR_TRANS
bool
depends on INFINIBAND && INET
@@ -40,6 +46,8 @@ source "drivers/infiniband/hw/ehca/Kconfig"
source "drivers/infiniband/hw/amso1100/Kconfig"
source "drivers/infiniband/hw/cxgb3/Kconfig"
+source "drivers/infiniband/hw/mlx4/Kconfig"
+
source "drivers/infiniband/ulp/ipoib/Kconfig"
source "drivers/infiniband/ulp/srp/Kconfig"
diff --git a/drivers/infiniband/Makefile b/drivers/infiniband/Makefile
index da2066c4f22..75f325e40b5 100644
--- a/drivers/infiniband/Makefile
+++ b/drivers/infiniband/Makefile
@@ -4,6 +4,7 @@ obj-$(CONFIG_INFINIBAND_IPATH) += hw/ipath/
obj-$(CONFIG_INFINIBAND_EHCA) += hw/ehca/
obj-$(CONFIG_INFINIBAND_AMSO1100) += hw/amso1100/
obj-$(CONFIG_INFINIBAND_CXGB3) += hw/cxgb3/
+obj-$(CONFIG_MLX4_INFINIBAND) += hw/mlx4/
obj-$(CONFIG_INFINIBAND_IPOIB) += ulp/ipoib/
obj-$(CONFIG_INFINIBAND_SRP) += ulp/srp/
obj-$(CONFIG_INFINIBAND_ISER) += ulp/iser/
diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile
index 189e5d4b9b1..cb1ab3ea499 100644
--- a/drivers/infiniband/core/Makefile
+++ b/drivers/infiniband/core/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o ib_ucm.o \
ib_core-y := packer.o ud_header.o verbs.o sysfs.o \
device.o fmr_pool.o cache.o
+ib_core-$(CONFIG_INFINIBAND_USER_MEM) += umem.o
ib_mad-y := mad.o smi.o agent.o mad_rmpp.o
@@ -28,5 +29,4 @@ ib_umad-y := user_mad.o
ib_ucm-y := ucm.o
-ib_uverbs-y := uverbs_main.o uverbs_cmd.o uverbs_mem.o \
- uverbs_marshall.o
+ib_uverbs-y := uverbs_main.o uverbs_cmd.o uverbs_marshall.o
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 7fabb425b03..592c90aa318 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -613,6 +613,8 @@ static void __exit ib_core_cleanup(void)
{
ib_cache_cleanup();
ib_sysfs_cleanup();
+ /* Make sure that any pending umem accounting work is done. */
+ flush_scheduled_work();
}
module_init(ib_core_init);
diff --git a/drivers/infiniband/core/uverbs_mem.c b/drivers/infiniband/core/umem.c
index c95fe952abd..f32ca5fbb26 100644
--- a/drivers/infiniband/core/uverbs_mem.c
+++ b/drivers/infiniband/core/umem.c
@@ -39,13 +39,6 @@
#include "uverbs.h"
-struct ib_umem_account_work {
- struct work_struct work;
- struct mm_struct *mm;
- unsigned long diff;
-};
-
-
static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int dirty)
{
struct ib_umem_chunk *chunk, *tmp;
@@ -64,35 +57,56 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d
}
}
-int ib_umem_get(struct ib_device *dev, struct ib_umem *mem,
- void *addr, size_t size, int write)
+/**
+ * ib_umem_get - Pin and DMA map userspace memory.
+ * @context: userspace context to pin memory for
+ * @addr: userspace virtual address to start at
+ * @size: length of region to pin
+ * @access: IB_ACCESS_xxx flags for memory being pinned
+ */
+struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
+ size_t size, int access)
{
+ struct ib_umem *umem;
struct page **page_list;
struct ib_umem_chunk *chunk;
unsigned long locked;
unsigned long lock_limit;
unsigned long cur_base;
unsigned long npages;
- int ret = 0;
+ int ret;
int off;
int i;
if (!can_do_mlock())
- return -EPERM;
+ return ERR_PTR(-EPERM);
- page_list = (struct page **) __get_free_page(GFP_KERNEL);
- if (!page_list)
- return -ENOMEM;
+ umem = kmalloc(sizeof *umem, GFP_KERNEL);
+ if (!umem)
+ return ERR_PTR(-ENOMEM);
+
+ umem->context = context;
+ umem->length = size;
+ umem->offset = addr & ~PAGE_MASK;
+ umem->page_size = PAGE_SIZE;
+ /*
+ * We ask for writable memory if any access flags other than
+ * "remote read" are set. "Local write" and "remote write"
+ * obviously require write access. "Remote atomic" can do
+ * things like fetch and add, which will modify memory, and
+ * "MW bind" can change permissions by binding a window.
+ */
+ umem->writable = !!(access & ~IB_ACCESS_REMOTE_READ);
- mem->user_base = (unsigned long) addr;
- mem->length = size;
- mem->offset = (unsigned long) addr & ~PAGE_MASK;
- mem->page_size = PAGE_SIZE;
- mem->writable = write;
+ INIT_LIST_HEAD(&umem->chunk_list);
- INIT_LIST_HEAD(&mem->chunk_list);
+ page_list = (struct page **) __get_free_page(GFP_KERNEL);
+ if (!page_list) {
+ kfree(umem);
+ return ERR_PTR(-ENOMEM);
+ }
- npages = PAGE_ALIGN(size + mem->offset) >> PAGE_SHIFT;
+ npages = PAGE_ALIGN(size + umem->offset) >> PAGE_SHIFT;
down_write(&current->mm->mmap_sem);
@@ -104,13 +118,13 @@ int ib_umem_get(struct ib_device *dev, struct ib_umem *mem,
goto out;
}
- cur_base = (unsigned long) addr & PAGE_MASK;
+ cur_base = addr & PAGE_MASK;
while (npages) {
ret = get_user_pages(current, current->mm, cur_base,
min_t(int, npages,
PAGE_SIZE / sizeof (struct page *)),
- 1, !write, page_list, NULL);
+ 1, !umem->writable, page_list, NULL);
if (ret < 0)
goto out;
@@ -136,7 +150,7 @@ int ib_umem_get(struct ib_device *dev, struct ib_umem *mem,
chunk->page_list[i].length = PAGE_SIZE;
}
- chunk->nmap = ib_dma_map_sg(dev,
+ chunk->nmap = ib_dma_map_sg(context->device,
&chunk->page_list[0],
chunk->nents,
DMA_BIDIRECTIONAL);
@@ -151,75 +165,94 @@ int ib_umem_get(struct ib_device *dev, struct ib_umem *mem,
ret -= chunk->nents;
off += chunk->nents;
- list_add_tail(&chunk->list, &mem->chunk_list);
+ list_add_tail(&chunk->list, &umem->chunk_list);
}
ret = 0;
}
out:
- if (ret < 0)
- __ib_umem_release(dev, mem, 0);
- else
+ if (ret < 0) {
+ __ib_umem_release(context->device, umem, 0);
+ kfree(umem);
+ } else
current->mm->locked_vm = locked;
up_write(&current->mm->mmap_sem);
free_page((unsigned long) page_list);
- return ret;
+ return ret < 0 ? ERR_PTR(ret) : umem;
}
+EXPORT_SYMBOL(ib_umem_get);
-void ib_umem_release(struct ib_device *dev, struct ib_umem *umem)
+static void ib_umem_account(struct work_struct *work)
{
- __ib_umem_release(dev, umem, 1);
-
- down_write(&current->mm->mmap_sem);
- current->mm->locked_vm -=
- PAGE_ALIGN(umem->length + umem->offset) >> PAGE_SHIFT;
- up_write(&current->mm->mmap_sem);
-}
+ struct ib_umem *umem = container_of(work, struct ib_umem, work);
-static void ib_umem_account(struct work_struct *_work)
-{
- struct ib_umem_account_work *work =
- container_of(_work, struct ib_umem_account_work, work);
-
- down_write(&work->mm->mmap_sem);
- work->mm->locked_vm -= work->diff;
- up_write(&work->mm->mmap_sem);
- mmput(work->mm);
- kfree(work);
+ down_write(&umem->mm->mmap_sem);
+ umem->mm->locked_vm -= umem->diff;
+ up_write(&umem->mm->mmap_sem);
+ mmput(umem->mm);
+ kfree(umem);
}
-void ib_umem_release_on_close(struct ib_device *dev, struct ib_umem *umem)
+/**
+ * ib_umem_release - release memory pinned with ib_umem_get
+ * @umem: umem struct to release
+ */
+void ib_umem_release(struct ib_umem *umem)
{
- struct ib_umem_account_work *work;
+ struct ib_ucontext *context = umem->context;
struct mm_struct *mm;
+ unsigned long diff;
- __ib_umem_release(dev, umem, 1);
+ __ib_umem_release(umem->context->device, umem, 1);
mm = get_task_mm(current);
if (!mm)
return;
+ diff = PAGE_ALIGN(umem->length + umem->offset) >> PAGE_SHIFT;
+
/*
* We may be called with the mm's mmap_sem already held. This
* can happen when a userspace munmap() is the call that drops
* the last reference to our file and calls our release
* method. If there are memory regions to destroy, we'll end
- * up here and not be able to take the mmap_sem. Therefore we
- * defer the vm_locked accounting to the system workqueue.
+ * up here and not be able to take the mmap_sem. In that case
+ * we defer the vm_locked accounting to the system workqueue.
*/
+ if (context->closing && !down_write_trylock(&mm->mmap_sem)) {
+ INIT_WORK(&umem->work, ib_umem_account);
+ umem->mm = mm;
+ umem->diff = diff;
- work = kmalloc(sizeof *work, GFP_KERNEL);
- if (!work) {
- mmput(mm);
+ schedule_work(&umem->work);
return;
- }
+ } else
+ down_write(&mm->mmap_sem);
+
+ current->mm->locked_vm -= diff;
+ up_write(&mm->mmap_sem);
+ mmput(mm);
+ kfree(umem);
+}
+EXPORT_SYMBOL(ib_umem_release);
+
+int ib_umem_page_count(struct ib_umem *umem)
+{
+ struct ib_umem_chunk *chunk;
+ int shift;
+ int i;
+ int n;
+
+ shift = ilog2(umem->page_size);
- INIT_WORK(&work->work, ib_umem_account);
- work->mm = mm;
- work->diff = PAGE_ALIGN(umem->length + umem->offset) >> PAGE_SHIFT;
+ n = 0;
+ list_for_each_entry(chunk, &umem->chunk_list, list)
+ for (i = 0; i < chunk->nmap; ++i)
+ n += sg_dma_len(&chunk->page_list[i]) >> shift;
- schedule_work(&work->work);
+ return n;
}
+EXPORT_SYMBOL(ib_umem_page_count);
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
index 102a59c033f..c33546f9e96 100644
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -45,6 +45,7 @@
#include <linux/completion.h>
#include <rdma/ib_verbs.h>
+#include <rdma/ib_umem.h>
#include <rdma/ib_user_verbs.h>
/*
@@ -163,11 +164,6 @@ void ib_uverbs_srq_event_handler(struct ib_event *event, void *context_ptr);
void ib_uverbs_event_handler(struct ib_event_handler *handler,
struct ib_event *event);
-int ib_umem_get(struct ib_device *dev, struct ib_umem *mem,
- void *addr, size_t size, int write);
-void ib_umem_release(struct ib_device *dev, struct ib_umem *umem);
-void ib_umem_release_on_close(struct ib_device *dev, struct ib_umem *umem);
-
#define IB_UVERBS_DECLARE_CMD(name) \
ssize_t ib_uverbs_##name(struct ib_uverbs_file *file, \
const char __user *buf, int in_len, \
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index bab66769be1..01d70084aeb 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2005 Topspin Communications. All rights reserved.
- * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
+ * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved.
* Copyright (c) 2005 PathScale, Inc. All rights reserved.
* Copyright (c) 2006 Mellanox Technologies. All rights reserved.
*
@@ -295,6 +295,7 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
INIT_LIST_HEAD(&ucontext->qp_list);
INIT_LIST_HEAD(&ucontext->srq_list);
INIT_LIST_HEAD(&ucontext->ah_list);
+ ucontext->closing = 0;
resp.num_comp_vectors = file->device->num_comp_vectors;
@@ -573,7 +574,7 @@ ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
struct ib_uverbs_reg_mr cmd;
struct ib_uverbs_reg_mr_resp resp;
struct ib_udata udata;
- struct ib_umem_object *obj;
+ struct ib_uobject *uobj;
struct ib_pd *pd;
struct ib_mr *mr;
int ret;
@@ -599,35 +600,21 @@ ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
!(cmd.access_flags & IB_ACCESS_LOCAL_WRITE))
return -EINVAL;
- obj = kmalloc(sizeof *obj, GFP_KERNEL);
- if (!obj)
+ uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
+ if (!uobj)
return -ENOMEM;
- init_uobj(&obj->uobject, 0, file->ucontext, &mr_lock_key);
- down_write(&obj->uobject.mutex);
-
- /*
- * We ask for writable memory if any access flags other than
- * "remote read" are set. "Local write" and "remote write"
- * obviously require write access. "Remote atomic" can do
- * things like fetch and add, which will modify memory, and
- * "MW bind" can change permissions by binding a window.
- */
- ret = ib_umem_get(file->device->ib_dev, &obj->umem,
- (void *) (unsigned long) cmd.start, cmd.length,
- !!(cmd.access_flags & ~IB_ACCESS_REMOTE_READ));
- if (ret)
- goto err_free;
-
- obj->umem.virt_base = cmd.hca_va;
+ init_uobj(uobj, 0, file->ucontext, &mr_lock_key);
+ down_write(&uobj->mutex);
pd = idr_read_pd(cmd.pd_handle, file->ucontext);
if (!pd) {
ret = -EINVAL;
- goto err_release;
+ goto err_free;
}
- mr = pd->device->reg_user_mr(pd, &obj->umem, cmd.access_flags, &udata);
+ mr = pd->device->reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va,
+ cmd.access_flags, &udata);
if (IS_ERR(mr)) {
ret = PTR_ERR(mr);
goto err_put;
@@ -635,19 +622,19 @@ ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
mr->device = pd->device;
mr->pd = pd;
- mr->uobject = &obj->uobject;
+ mr->uobject = uobj;
atomic_inc(&pd->usecnt);
atomic_set(&mr->usecnt, 0);
- obj->uobject.object = mr;
- ret = idr_add_uobj(&ib_uverbs_mr_idr, &obj->uobject);
+ uobj->object = mr;
+ ret = idr_add_uobj(&ib_uverbs_mr_idr, uobj);
if (ret)
goto err_unreg;
memset(&resp, 0, sizeof resp);
resp.lkey = mr->lkey;
resp.rkey = mr->rkey;
- resp.mr_handle = obj->uobject.id;
+ resp.mr_handle = uobj->id;
if (copy_to_user((void __user *) (unsigned long) cmd.response,
&resp, sizeof resp)) {
@@ -658,17 +645,17 @@ ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
put_pd_read(pd);
mutex_lock(&file->mutex);
- list_add_tail(&obj->uobject.list, &file->ucontext->mr_list);
+ list_add_tail(&uobj->list, &file->ucontext->mr_list);
mutex_unlock(&file->mutex);
- obj->uobject.live = 1;
+ uobj->live = 1;
- up_write(&obj->uobject.mutex);
+ up_write(&uobj->mutex);
return in_len;
err_copy:
- idr_remove_uobj(&ib_uverbs_mr_idr, &obj->uobject);
+ idr_remove_uobj(&ib_uverbs_mr_idr, uobj);
err_unreg:
ib_dereg_mr(mr);
@@ -676,11 +663,8 @@ err_unreg:
err_put:
put_pd_read(pd);
-err_release:
- ib_umem_release(file->device->ib_dev, &obj->umem);
-
err_free:
- put_uobj_write(&obj->uobject);
+ put_uobj_write(uobj);
return ret;
}
@@ -691,7 +675,6 @@ ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file,
struct ib_uverbs_dereg_mr cmd;
struct ib_mr *mr;
struct ib_uobject *uobj;
- struct ib_umem_object *memobj;
int ret = -EINVAL;
if (copy_from_user(&cmd, buf, sizeof cmd))
@@ -701,8 +684,7 @@ ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file,
if (!uobj)
return -EINVAL;
- memobj = container_of(uobj, struct ib_umem_object, uobject);
- mr = uobj->object;
+ mr = uobj->object;
ret = ib_dereg_mr(mr);
if (!ret)
@@ -719,8 +701,6 @@ ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file,
list_del(&uobj->list);
mutex_unlock(&file->mutex);
- ib_umem_release(file->device->ib_dev, &memobj->umem);
-
put_uobj(uobj);
return in_len;
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index d44e5479965..14d7ccd8919 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -183,6 +183,8 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file,
if (!context)
return 0;
+ context->closing = 1;
+
list_for_each_entry_safe(uobj, tmp, &context->ah_list, list) {
struct ib_ah *ah = uobj->object;
@@ -230,16 +232,10 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file,
list_for_each_entry_safe(uobj, tmp, &context->mr_list, list) {
struct ib_mr *mr = uobj->object;
- struct ib_device *mrdev = mr->device;
- struct ib_umem_object *memobj;
idr_remove_uobj(&ib_uverbs_mr_idr, uobj);
ib_dereg_mr(mr);
-
- memobj = container_of(uobj, struct ib_umem_object, uobject);
- ib_umem_release_on_close(mrdev, &memobj->umem);
-
- kfree(memobj);
+ kfree(uobj);
}
list_for_each_entry_safe(uobj, tmp, &context->pd_list, list) {
@@ -906,7 +902,6 @@ static void __exit ib_uverbs_cleanup(void)
unregister_filesystem(&uverbs_event_fs);
class_destroy(uverbs_class);
unregister_chrdev_region(IB_UVERBS_BASE_DEV, IB_UVERBS_MAX_DEVICES);
- flush_scheduled_work();
idr_destroy(&ib_uverbs_pd_idr);
idr_destroy(&ib_uverbs_mr_idr);
idr_destroy(&ib_uverbs_mw_idr);
diff --git a/drivers/infiniband/hw/amso1100/c2_provider.c b/drivers/infiniband/hw/amso1100/c2_provider.c
index 109166223c0..997cf153076 100644
--- a/drivers/infiniband/hw/amso1100/c2_provider.c
+++ b/drivers/infiniband/hw/amso1100/c2_provider.c
@@ -56,6 +56,7 @@
#include <asm/byteorder.h>
#include <rdma/ib_smi.h>
+#include <rdma/ib_umem.h>
#include <rdma/ib_user_verbs.h>
#include "c2.h"
#include "c2_provider.h"
@@ -396,6 +397,7 @@ static struct ib_mr *c2_reg_phys_mr(struct ib_pd *ib_pd,
}
mr->pd = to_c2pd(ib_pd);
+ mr->umem = NULL;
pr_debug("%s - page shift %d, pbl_depth %d, total_len %u, "
"*iova_start %llx, first pa %llx, last pa %llx\n",
__FUNCTION__, page_shift, pbl_depth, total_len,
@@ -428,8 +430,8 @@ static struct ib_mr *c2_get_dma_mr(struct ib_pd *pd, int acc)
return c2_reg_phys_mr(pd, &bl, 1, acc, &kva);
}
-static struct ib_mr *c2_reg_user_mr(struct ib_pd *pd, struct ib_umem *region,
- int acc, struct ib_udata *udata)
+static struct ib_mr *c2_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
+ u64 virt, int acc, struct ib_udata *udata)
{
u64 *pages;
u64 kva = 0;
@@ -441,15 +443,23 @@ static struct ib_mr *c2_reg_user_mr(struct ib_pd *pd, struct ib_umem *region,
struct c2_mr *c2mr;
pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
- shift = ffs(region->page_size) - 1;
c2mr = kmalloc(sizeof(*c2mr), GFP_KERNEL);
if (!c2mr)
return ERR_PTR(-ENOMEM);
c2mr->pd = c2pd;
+ c2mr->umem = ib_umem_get(pd->uobject->context, start, length, acc);
+ if (IS_ERR(c2mr->umem)) {
+ err = PTR_ERR(c2mr->umem);
+ kfree(c2mr);
+ return ERR_PTR(err);
+ }
+
+ shift = ffs(c2mr->umem->page_size) - 1;
+
n = 0;
- list_for_each_entry(chunk, &region->chunk_list, list)
+ list_for_each_entry(chunk, &c2mr->umem->chunk_list, list)
n += chunk->nents;
pages = kmalloc(n * sizeof(u64), GFP_KERNEL);
@@ -459,35 +469,34 @@ static struct ib_mr *c2_reg_user_mr(struct ib_pd *pd, struct ib_umem *region,
}
i = 0;
- list_for_each_entry(chunk, &region->chunk_list, list) {
+ list_for_each_entry(chunk, &c2mr->umem->chunk_list, list) {
for (j = 0; j < chunk->nmap; ++j) {
len = sg_dma_len(&chunk->page_list[j]) >> shift;
for (k = 0; k < len; ++k) {
pages[i++] =
sg_dma_address(&chunk->page_list[j]) +
- (region->page_size * k);
+ (c2mr->umem->page_size * k);
}
}
}
- kva = (u64)region->virt_base;
+ kva = virt;
err = c2_nsmr_register_phys_kern(to_c2dev(pd->device),
pages,
- region->page_size,
+ c2mr->umem->page_size,
i,
- region->length,
- region->offset,
+ length,
+ c2mr->umem->offset,
&kva,
c2_convert_access(acc),
c2mr);
kfree(pages);
- if (err) {
- kfree(c2mr);
- return ERR_PTR(err);
- }
+ if (err)
+ goto err;
return &c2mr->ibmr;
err:
+ ib_umem_release(c2mr->umem);
kfree(c2mr);
return ERR_PTR(err);
}
@@ -502,8 +511,11 @@ static int c2_dereg_mr(struct ib_mr *ib_mr)
err = c2_stag_dealloc(to_c2dev(ib_mr->device), ib_mr->lkey);
if (err)
pr_debug("c2_stag_dealloc failed: %d\n", err);
- else
+ else {
+ if (mr->umem)
+ ib_umem_release(mr->umem);
kfree(mr);
+ }
return err;
}
diff --git a/drivers/infiniband/hw/amso1100/c2_provider.h b/drivers/infiniband/hw/amso1100/c2_provider.h
index fc906223220..1076df2ee96 100644
--- a/drivers/infiniband/hw/amso1100/c2_provider.h
+++ b/drivers/infiniband/hw/amso1100/c2_provider.h
@@ -73,6 +73,7 @@ struct c2_pd {
struct c2_mr {
struct ib_mr ibmr;
struct c2_pd *pd;
+ struct ib_umem *umem;
};
struct c2_av;
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index a891493fd34..e7c2c394803 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -47,6 +47,7 @@
#include <rdma/iw_cm.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_smi.h>
+#include <rdma/ib_umem.h>
#include <rdma/ib_user_verbs.h>
#include "cxio_hal.h"
@@ -443,6 +444,8 @@ static int iwch_dereg_mr(struct ib_mr *ib_mr)
remove_handle(rhp, &rhp->mmidr, mmid);
if (mhp->kva)
kfree((void *) (unsigned long) mhp->kva);
+ if (mhp->umem)
+ ib_umem_release(mhp->umem);
PDBG("%s mmid 0x%x ptr %p\n", __FUNCTION__, mmid, mhp);
kfree(mhp);
return 0;
@@ -577,8 +580,8 @@ static int iwch_reregister_phys_mem(struct ib_mr *mr,
}
-static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, struct ib_umem *region,
- int acc, struct ib_udata *udata)
+static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
+ u64 virt, int acc, struct ib_udata *udata)
{
__be64 *pages;
int shift, n, len;
@@ -591,7 +594,6 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, struct ib_umem *region,
struct iwch_reg_user_mr_resp uresp;
PDBG("%s ib_pd %p\n", __FUNCTION__, pd);
- shift = ffs(region->page_size) - 1;
php = to_iwch_pd(pd);
rhp = php->rhp;
@@ -599,8 +601,17 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, struct ib_umem *region,
if (!mhp)
return ERR_PTR(-ENOMEM);
+ mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc);
+ if (IS_ERR(mhp->umem)) {
+ err = PTR_ERR(mhp->umem);
+ kfree(mhp);
+ return ERR_PTR(err);
+ }
+
+ shift = ffs(mhp->umem->page_size) - 1;
+
n = 0;
- list_for_each_entry(chunk, &region->chunk_list, list)
+ list_for_each_entry(chunk, &mhp->umem->chunk_list, list)
n += chunk->nents;
pages = kmalloc(n * sizeof(u64), GFP_KERNEL);
@@ -611,13 +622,13 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, struct ib_umem *region,
i = n = 0;
- list_for_each_entry(chunk, &region->chunk_list, list)
+ list_for_each_entry(chunk, &mhp->umem->chunk_list, list)
for (j = 0; j < chunk->nmap; ++j) {
len = sg_dma_len(&chunk->page_list[j]) >> shift;
for (k = 0; k < len; ++k) {
pages[i++] = cpu_to_be64(sg_dma_address(
&chunk->page_list[j]) +
- region->page_size * k);
+ mhp->umem->page_size * k);
}
}
@@ -625,9 +636,9 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, struct ib_umem *region,
mhp->attr.pdid = php->pdid;
mhp->attr.zbva = 0;
mhp->attr.perms = iwch_ib_to_tpt_access(acc);
- mhp->attr.va_fbo = region->virt_base;
+ mhp->attr.va_fbo = virt;
mhp->attr.page_size = shift - 12;
- mhp->attr.len = (u32) region->length;
+ mhp->attr.len = (u32) length;
mhp->attr.pbl_size = i;
err = iwch_register_mem(rhp, php, mhp, shift, pages);
kfree(pages);
@@ -650,6 +661,7 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, struct ib_umem *region,
return &mhp->ibmr;
err:
+ ib_umem_release(mhp->umem);
kfree(mhp);
return ERR_PTR(err);
}
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.h b/drivers/infiniband/hw/cxgb3/iwch_provider.h
index 93bcc56756b..48833f3f3bd 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.h
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.h
@@ -73,6 +73,7 @@ struct tpt_attributes {
struct iwch_mr {
struct ib_mr ibmr;
+ struct ib_umem *umem;
struct iwch_dev *rhp;
u64 kva;
struct tpt_attributes attr;
diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h
index 10fb8fbafa0..f64d42b0867 100644
--- a/drivers/infiniband/hw/ehca/ehca_classes.h
+++ b/drivers/infiniband/hw/ehca/ehca_classes.h
@@ -176,6 +176,7 @@ struct ehca_mr {
struct ib_mr ib_mr; /* must always be first in ehca_mr */
struct ib_fmr ib_fmr; /* must always be first in ehca_mr */
} ib;
+ struct ib_umem *umem;
spinlock_t mrlock;
enum ehca_mr_flag flags;
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c
index f284be1c916..82dda2faf4d 100644
--- a/drivers/infiniband/hw/ehca/ehca_irq.c
+++ b/drivers/infiniband/hw/ehca/ehca_irq.c
@@ -745,6 +745,7 @@ static int comp_pool_callback(struct notifier_block *nfb,
switch (action) {
case CPU_UP_PREPARE:
+ case CPU_UP_PREPARE_FROZEN:
ehca_gen_dbg("CPU: %x (CPU_PREPARE)", cpu);
if(!create_comp_task(pool, cpu)) {
ehca_gen_err("Can't create comp_task for cpu: %x", cpu);
@@ -752,24 +753,29 @@ static int comp_pool_callback(struct notifier_block *nfb,
}
break;
case CPU_UP_CANCELED:
+ case CPU_UP_CANCELED_FROZEN:
ehca_gen_dbg("CPU: %x (CPU_CANCELED)", cpu);
cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
kthread_bind(cct->task, any_online_cpu(cpu_online_map));
destroy_comp_task(pool, cpu);
break;
case CPU_ONLINE:
+ case CPU_ONLINE_FROZEN:
ehca_gen_dbg("CPU: %x (CPU_ONLINE)", cpu);
cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
kthread_bind(cct->task, cpu);
wake_up_process(cct->task);
break;
case CPU_DOWN_PREPARE:
+ case CPU_DOWN_PREPARE_FROZEN:
ehca_gen_dbg("CPU: %x (CPU_DOWN_PREPARE)", cpu);
break;
case CPU_DOWN_FAILED:
+ case CPU_DOWN_FAILED_FROZEN:
ehca_gen_dbg("CPU: %x (CPU_DOWN_FAILED)", cpu);
break;
case CPU_DEAD:
+ case CPU_DEAD_FROZEN:
ehca_gen_dbg("CPU: %x (CPU_DEAD)", cpu);
destroy_comp_task(pool, cpu);
take_over_work(pool, cpu);
diff --git a/drivers/infiniband/hw/ehca/ehca_iverbs.h b/drivers/infiniband/hw/ehca/ehca_iverbs.h
index e14b029332c..37e7fe0908c 100644
--- a/drivers/infiniband/hw/ehca/ehca_iverbs.h
+++ b/drivers/infiniband/hw/ehca/ehca_iverbs.h
@@ -78,8 +78,7 @@ struct ib_mr *ehca_reg_phys_mr(struct ib_pd *pd,
int num_phys_buf,
int mr_access_flags, u64 *iova_start);
-struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd,
- struct ib_umem *region,
+struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, u64 virt,
int mr_access_flags, struct ib_udata *udata);
int ehca_rereg_phys_mr(struct ib_mr *mr,
diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.c b/drivers/infiniband/hw/ehca/ehca_mrmw.c
index d22ab563633..84c5bb49856 100644
--- a/drivers/infiniband/hw/ehca/ehca_mrmw.c
+++ b/drivers/infiniband/hw/ehca/ehca_mrmw.c
@@ -39,6 +39,8 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
+#include <rdma/ib_umem.h>
+
#include <asm/current.h>
#include "ehca_iverbs.h"
@@ -238,10 +240,8 @@ reg_phys_mr_exit0:
/*----------------------------------------------------------------------*/
-struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd,
- struct ib_umem *region,
- int mr_access_flags,
- struct ib_udata *udata)
+struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, u64 virt,
+ int mr_access_flags, struct ib_udata *udata)
{
struct ib_mr *ib_mr;
struct ehca_mr *e_mr;
@@ -257,11 +257,7 @@ struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd,
ehca_gen_err("bad pd=%p", pd);
return ERR_PTR(-EFAULT);
}
- if (!region) {
- ehca_err(pd->device, "bad input values: region=%p", region);
- ib_mr = ERR_PTR(-EINVAL);
- goto reg_user_mr_exit0;
- }
+
if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
!(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
@@ -275,17 +271,10 @@ struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd,
ib_mr = ERR_PTR(-EINVAL);
goto reg_user_mr_exit0;
}
- if (region->page_size != PAGE_SIZE) {
- ehca_err(pd->device, "page size not supported, "
- "region->page_size=%x", region->page_size);
- ib_mr = ERR_PTR(-EINVAL);
- goto reg_user_mr_exit0;
- }
- if ((region->length == 0) ||
- ((region->virt_base + region->length) < region->virt_base)) {
+ if (length == 0 || virt + length < virt) {
ehca_err(pd->device, "bad input values: length=%lx "
- "virt_base=%lx", region->length, region->virt_base);
+ "virt_base=%lx", length, virt);
ib_mr = ERR_PTR(-EINVAL);
goto reg_user_mr_exit0;
}
@@ -297,40 +286,55 @@ struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd,
goto reg_user_mr_exit0;
}
+ e_mr->umem = ib_umem_get(pd->uobject->context, start, length,
+ mr_access_flags);
+ if (IS_ERR(e_mr->umem)) {
+ ib_mr = (void *) e_mr->umem;
+ goto reg_user_mr_exit1;
+ }
+
+ if (e_mr->umem->page_size != PAGE_SIZE) {
+ ehca_err(pd->device, "page size not supported, "
+ "e_mr->umem->page_size=%x", e_mr->umem->page_size);
+ ib_mr = ERR_PTR(-EINVAL);
+ goto reg_user_mr_exit2;
+ }
+
/* determine number of MR pages */
- num_pages_mr = (((region->virt_base % PAGE_SIZE) + region->length +
- PAGE_SIZE - 1) / PAGE_SIZE);
- num_pages_4k = (((region->virt_base % EHCA_PAGESIZE) + region->length +
- EHCA_PAGESIZE - 1) / EHCA_PAGESIZE);
+ num_pages_mr = (((virt % PAGE_SIZE) + length + PAGE_SIZE - 1) /
+ PAGE_SIZE);
+ num_pages_4k = (((virt % EHCA_PAGESIZE) + length + EHCA_PAGESIZE - 1) /
+ EHCA_PAGESIZE);
/* register MR on HCA */
pginfo.type = EHCA_MR_PGI_USER;
pginfo.num_pages = num_pages_mr;
pginfo.num_4k = num_pages_4k;
- pginfo.region = region;
- pginfo.next_4k = region->offset / EHCA_PAGESIZE;
+ pginfo.region = e_mr->umem;
+ pginfo.next_4k = e_mr->umem->offset / EHCA_PAGESIZE;
pginfo.next_chunk = list_prepare_entry(pginfo.next_chunk,
- (&region->chunk_list),
+ (&e_mr->umem->chunk_list),
list);
- ret = ehca_reg_mr(shca, e_mr, (u64*)region->virt_base,
- region->length, mr_access_flags, e_pd, &pginfo,
- &e_mr->ib.ib_mr.lkey, &e_mr->ib.ib_mr.rkey);
+ ret = ehca_reg_mr(shca, e_mr, (u64*) virt, length, mr_access_flags, e_pd,
+ &pginfo, &e_mr->ib.ib_mr.lkey, &e_mr->ib.ib_mr.rkey);
if (ret) {
ib_mr = ERR_PTR(ret);
- goto reg_user_mr_exit1;
+ goto reg_user_mr_exit2;
}
/* successful registration of all pages */
return &e_mr->ib.ib_mr;
+reg_user_mr_exit2:
+ ib_umem_release(e_mr->umem);
reg_user_mr_exit1:
ehca_mr_delete(e_mr);
reg_user_mr_exit0:
if (IS_ERR(ib_mr))
- ehca_err(pd->device, "rc=%lx pd=%p region=%p mr_access_flags=%x"
+ ehca_err(pd->device, "rc=%lx pd=%p mr_access_flags=%x"
" udata=%p",
- PTR_ERR(ib_mr), pd, region, mr_access_flags, udata);
+ PTR_ERR(ib_mr), pd, mr_access_flags, udata);
return ib_mr;
} /* end ehca_reg_user_mr() */
@@ -596,6 +600,9 @@ int ehca_dereg_mr(struct ib_mr *mr)
goto dereg_mr_exit0;
}
+ if (e_mr->umem)
+ ib_umem_release(e_mr->umem);
+
/* successful deregistration */
ehca_mr_delete(e_mr);
diff --git a/drivers/infiniband/hw/ipath/ipath_mr.c b/drivers/infiniband/hw/ipath/ipath_mr.c
index 31e70732e36..bdeef8d4f27 100644
--- a/drivers/infiniband/hw/ipath/ipath_mr.c
+++ b/drivers/infiniband/hw/ipath/ipath_mr.c
@@ -31,6 +31,7 @@
* SOFTWARE.
*/
+#include <rdma/ib_umem.h>
#include <rdma/ib_pack.h>
#include <rdma/ib_smi.h>
@@ -147,6 +148,7 @@ struct ib_mr *ipath_reg_phys_mr(struct ib_pd *pd,
mr->mr.offset = 0;
mr->mr.access_flags = acc;
mr->mr.max_segs = num_phys_buf;
+ mr->umem = NULL;
m = 0;
n = 0;
@@ -170,46 +172,56 @@ bail:
/**
* ipath_reg_user_mr - register a userspace memory region
* @pd: protection domain for this memory region
- * @region: the user memory region
+ * @start: starting userspace address
+ * @length: length of region to register
+ * @virt_addr: virtual address to use (from HCA's point of view)
* @mr_access_flags: access flags for this memory region
* @udata: unused by the InfiniPath driver
*
* Returns the memory region on success, otherwise returns an errno.
*/
-struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, struct ib_umem *region,
- int mr_access_flags, struct ib_udata *udata)
+struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
+ u64 virt_addr, int mr_access_flags,
+ struct ib_udata *udata)
{
struct ipath_mr *mr;
+ struct ib_umem *umem;
struct ib_umem_chunk *chunk;
int n, m, i;
struct ib_mr *ret;
- if (region->length == 0) {
+ if (length == 0) {
ret = ERR_PTR(-EINVAL);
goto bail;
}
+ umem = ib_umem_get(pd->uobject->context, start, length, mr_access_flags);
+ if (IS_ERR(umem))
+ return (void *) umem;
+
n = 0;
- list_for_each_entry(chunk, &region->chunk_list, list)
+ list_for_each_entry(chunk, &umem->chunk_list, list)
n += chunk->nents;
mr = alloc_mr(n, &to_idev(pd->device)->lk_table);
if (!mr) {
ret = ERR_PTR(-ENOMEM);
+ ib_umem_release(umem);
goto bail;
}
mr->mr.pd = pd;
- mr->mr.user_base = region->user_base;
- mr->mr.iova = region->virt_base;
- mr->mr.length = region->length;
- mr->mr.offset = region->offset;
+ mr->mr.user_base = start;
+ mr->mr.iova = virt_addr;
+ mr->mr.length = length;
+ mr->mr.offset = umem->offset;
mr->mr.access_flags = mr_access_flags;
mr->mr.max_segs = n;
+ mr->umem = umem;
m = 0;
n = 0;
- list_for_each_entry(chunk, &region->chunk_list, list) {
+ list_for_each_entry(chunk, &umem->chunk_list, list) {
for (i = 0; i < chunk->nents; i++) {
void *vaddr;
@@ -219,7 +231,7 @@ struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, struct ib_umem *region,
goto bail;
}
mr->mr.map[m]->segs[n].vaddr = vaddr;
- mr->mr.map[m]->segs[n].length = region->page_size;
+ mr->mr.map[m]->segs[n].length = umem->page_size;
n++;
if (n == IPATH_SEGSZ) {
m++;
@@ -253,6 +265,10 @@ int ipath_dereg_mr(struct ib_mr *ibmr)
i--;
kfree(mr->mr.map[i]);
}
+
+ if (mr->umem)
+ ib_umem_release(mr->umem);
+
kfree(mr);
return 0;
}
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.h b/drivers/infiniband/hw/ipath/ipath_verbs.h
index 7064fc22272..088b837ebea 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.h
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.h
@@ -251,6 +251,7 @@ struct ipath_sge {
/* Memory region */
struct ipath_mr {
struct ib_mr ibmr;
+ struct ib_umem *umem;
struct ipath_mregion mr; /* must be last */
};
@@ -751,8 +752,8 @@ struct ib_mr *ipath_reg_phys_mr(struct ib_pd *pd,
struct ib_phys_buf *buffer_list,
int num_phys_buf, int acc, u64 *iova_start);
-struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, struct ib_umem *region,
- int mr_access_flags,
+struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
+ u64 virt_addr, int mr_access_flags,
struct ib_udata *udata);
int ipath_dereg_mr(struct ib_mr *ibmr);
diff --git a/drivers/infiniband/hw/mlx4/Kconfig b/drivers/infiniband/hw/mlx4/Kconfig
new file mode 100644
index 00000000000..b8912cdb966
--- /dev/null
+++ b/drivers/infiniband/hw/mlx4/Kconfig
@@ -0,0 +1,9 @@
+config MLX4_INFINIBAND
+ tristate "Mellanox ConnectX HCA support"
+ depends on INFINIBAND
+ select MLX4_CORE
+ ---help---
+ This driver provides low-level InfiniBand support for
+ Mellanox ConnectX PCI Express host channel adapters (HCAs).
+ This is required to use InfiniBand protocols such as
+ IP-over-IB or SRP with these devices.
diff --git a/drivers/infiniband/hw/mlx4/Makefile b/drivers/infiniband/hw/mlx4/Makefile
new file mode 100644
index 00000000000..70f09c7826d
--- /dev/null
+++ b/drivers/infiniband/hw/mlx4/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_MLX4_INFINIBAND) += mlx4_ib.o
+
+mlx4_ib-y := ah.o cq.o doorbell.o mad.o main.o mr.o qp.o srq.o
diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c
new file mode 100644
index 00000000000..c75ac9463e2
--- /dev/null
+++ b/drivers/infiniband/hw/mlx4/ah.c
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "mlx4_ib.h"
+
+struct ib_ah *mlx4_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
+{
+ struct mlx4_dev *dev = to_mdev(pd->device)->dev;
+ struct mlx4_ib_ah *ah;
+
+ ah = kmalloc(sizeof *ah, GFP_ATOMIC);
+ if (!ah)
+ return ERR_PTR(-ENOMEM);
+
+ memset(&ah->av, 0, sizeof ah->av);
+
+ ah->av.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24));
+ ah->av.g_slid = ah_attr->src_path_bits;
+ ah->av.dlid = cpu_to_be16(ah_attr->dlid);
+ if (ah_attr->static_rate) {
+ ah->av.stat_rate = ah_attr->static_rate + MLX4_STAT_RATE_OFFSET;
+ while (ah->av.stat_rate > IB_RATE_2_5_GBPS + MLX4_STAT_RATE_OFFSET &&
+ !(1 << ah->av.stat_rate & dev->caps.stat_rate_support))
+ --ah->av.stat_rate;
+ }
+ ah->av.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28);
+ if (ah_attr->ah_flags & IB_AH_GRH) {
+ ah->av.g_slid |= 0x80;
+ ah->av.gid_index = ah_attr->grh.sgid_index;
+ ah->av.hop_limit = ah_attr->grh.hop_limit;
+ ah->av.sl_tclass_flowlabel |=
+ cpu_to_be32((ah_attr->grh.traffic_class << 20) |
+ ah_attr->grh.flow_label);
+ memcpy(ah->av.dgid, ah_attr->grh.dgid.raw, 16);
+ }
+
+ return &ah->ibah;
+}
+
+int mlx4_ib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
+{
+ struct mlx4_ib_ah *ah = to_mah(ibah);
+
+ memset(ah_attr, 0, sizeof *ah_attr);
+ ah_attr->dlid = be16_to_cpu(ah->av.dlid);
+ ah_attr->sl = be32_to_cpu(ah->av.sl_tclass_flowlabel) >> 28;
+ ah_attr->port_num = be32_to_cpu(ah->av.port_pd) >> 24;
+ if (ah->av.stat_rate)
+ ah_attr->static_rate = ah->av.stat_rate - MLX4_STAT_RATE_OFFSET;
+ ah_attr->src_path_bits = ah->av.g_slid & 0x7F;
+
+ if (mlx4_ib_ah_grh_present(ah)) {
+ ah_attr->ah_flags = IB_AH_GRH;
+
+ ah_attr->grh.traffic_class =
+ be32_to_cpu(ah->av.sl_tclass_flowlabel) >> 20;
+ ah_attr->grh.flow_label =
+ be32_to_cpu(ah->av.sl_tclass_flowlabel) & 0xfffff;
+ ah_attr->grh.hop_limit = ah->av.hop_limit;
+ ah_attr->grh.sgid_index = ah->av.gid_index;
+ memcpy(ah_attr->grh.dgid.raw, ah->av.dgid, 16);
+ }
+
+ return 0;
+}
+
+int mlx4_ib_destroy_ah(struct ib_ah *ah)
+{
+ kfree(to_mah(ah));
+ return 0;
+}
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
new file mode 100644
index 00000000000..b2a290c6703
--- /dev/null
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -0,0 +1,525 @@
+/*
+ * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/mlx4/cq.h>
+#include <linux/mlx4/qp.h>
+
+#include "mlx4_ib.h"
+#include "user.h"
+
+static void mlx4_ib_cq_comp(struct mlx4_cq *cq)
+{
+ struct ib_cq *ibcq = &to_mibcq(cq)->ibcq;
+ ibcq->comp_handler(ibcq, ibcq->cq_context);
+}
+
+static void mlx4_ib_cq_event(struct mlx4_cq *cq, enum mlx4_event type)
+{
+ struct ib_event event;
+ struct ib_cq *ibcq;
+
+ if (type != MLX4_EVENT_TYPE_CQ_ERROR) {
+ printk(KERN_WARNING "mlx4_ib: Unexpected event type %d "
+ "on CQ %06x\n", type, cq->cqn);
+ return;
+ }
+
+ ibcq = &to_mibcq(cq)->ibcq;
+ if (ibcq->event_handler) {
+ event.device = ibcq->device;
+ event.event = IB_EVENT_CQ_ERR;
+ event.element.cq = ibcq;
+ ibcq->event_handler(&event, ibcq->cq_context);
+ }
+}
+
+static void *get_cqe_from_buf(struct mlx4_ib_cq_buf *buf, int n)
+{
+ int offset = n * sizeof (struct mlx4_cqe);
+
+ if (buf->buf.nbufs == 1)
+ return buf->buf.u.direct.buf + offset;
+ else
+ return buf->buf.u.page_list[offset >> PAGE_SHIFT].buf +
+ (offset & (PAGE_SIZE - 1));
+}
+
+static void *get_cqe(struct mlx4_ib_cq *cq, int n)
+{
+ return get_cqe_from_buf(&cq->buf, n);
+}
+
+static void *get_sw_cqe(struct mlx4_ib_cq *cq, int n)
+{
+ struct mlx4_cqe *cqe = get_cqe(cq, n & cq->ibcq.cqe);
+
+ return (!!(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK) ^
+ !!(n & (cq->ibcq.cqe + 1))) ? NULL : cqe;
+}
+
+static struct mlx4_cqe *next_cqe_sw(struct mlx4_ib_cq *cq)
+{
+ return get_sw_cqe(cq, cq->mcq.cons_index);
+}
+
+struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector,
+ struct ib_ucontext *context,
+ struct ib_udata *udata)
+{
+ struct mlx4_ib_dev *dev = to_mdev(ibdev);
+ struct mlx4_ib_cq *cq;
+ struct mlx4_uar *uar;
+ int buf_size;
+ int err;
+
+ if (entries < 1 || entries > dev->dev->caps.max_cqes)
+ return ERR_PTR(-EINVAL);
+
+ cq = kmalloc(sizeof *cq, GFP_KERNEL);
+ if (!cq)
+ return ERR_PTR(-ENOMEM);
+
+ entries = roundup_pow_of_two(entries + 1);
+ cq->ibcq.cqe = entries - 1;
+ buf_size = entries * sizeof (struct mlx4_cqe);
+ spin_lock_init(&cq->lock);
+
+ if (context) {
+ struct mlx4_ib_create_cq ucmd;
+
+ if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
+ err = -EFAULT;
+ goto err_cq;
+ }
+
+ cq->umem = ib_umem_get(context, ucmd.buf_addr, buf_size,
+ IB_ACCESS_LOCAL_WRITE);
+ if (IS_ERR(cq->umem)) {
+ err = PTR_ERR(cq->umem);
+ goto err_cq;
+ }
+
+ err = mlx4_mtt_init(dev->dev, ib_umem_page_count(cq->umem),
+ ilog2(cq->umem->page_size), &cq->buf.mtt);
+ if (err)
+ goto err_buf;
+
+ err = mlx4_ib_umem_write_mtt(dev, &cq->buf.mtt, cq->umem);
+ if (err)
+ goto err_mtt;
+
+ err = mlx4_ib_db_map_user(to_mucontext(context), ucmd.db_addr,
+ &cq->db);
+ if (err)
+ goto err_mtt;
+
+ uar = &to_mucontext(context)->uar;
+ } else {
+ err = mlx4_ib_db_alloc(dev, &cq->db, 1);
+ if (err)
+ goto err_cq;
+
+ cq->mcq.set_ci_db = cq->db.db;
+ cq->mcq.arm_db = cq->db.db + 1;
+ *cq->mcq.set_ci_db = 0;
+ *cq->mcq.arm_db = 0;
+
+ if (mlx4_buf_alloc(dev->dev, buf_size, PAGE_SIZE * 2, &cq->buf.buf)) {
+ err = -ENOMEM;
+ goto err_db;
+ }
+
+ err = mlx4_mtt_init(dev->dev, cq->buf.buf.npages, cq->buf.buf.page_shift,
+ &cq->buf.mtt);
+ if (err)
+ goto err_buf;
+
+ err = mlx4_buf_write_mtt(dev->dev, &cq->buf.mtt, &cq->buf.buf);
+ if (err)
+ goto err_mtt;
+
+ uar = &dev->priv_uar;
+ }
+
+ err = mlx4_cq_alloc(dev->dev, entries, &cq->buf.mtt, uar,
+ cq->db.dma, &cq->mcq);
+ if (err)
+ goto err_dbmap;
+
+ cq->mcq.comp = mlx4_ib_cq_comp;
+ cq->mcq.event = mlx4_ib_cq_event;
+
+ if (context)
+ if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof (__u32))) {
+ err = -EFAULT;
+ goto err_dbmap;
+ }
+
+ return &cq->ibcq;
+
+err_dbmap:
+ if (context)
+ mlx4_ib_db_unmap_user(to_mucontext(context), &cq->db);
+
+err_mtt:
+ mlx4_mtt_cleanup(dev->dev, &cq->buf.mtt);
+
+err_buf:
+ if (context)
+ ib_umem_release(cq->umem);
+ else
+ mlx4_buf_free(dev->dev, entries * sizeof (struct mlx4_cqe),
+ &cq->buf.buf);
+
+err_db:
+ if (!context)
+ mlx4_ib_db_free(dev, &cq->db);
+
+err_cq:
+ kfree(cq);
+
+ return ERR_PTR(err);
+}
+
+int mlx4_ib_destroy_cq(struct ib_cq *cq)
+{
+ struct mlx4_ib_dev *dev = to_mdev(cq->device);
+ struct mlx4_ib_cq *mcq = to_mcq(cq);
+
+ mlx4_cq_free(dev->dev, &mcq->mcq);
+ mlx4_mtt_cleanup(dev->dev, &mcq->buf.mtt);
+
+ if (cq->uobject) {
+ mlx4_ib_db_unmap_user(to_mucontext(cq->uobject->context), &mcq->db);
+ ib_umem_release(mcq->umem);
+ } else {
+ mlx4_buf_free(dev->dev, (cq->cqe + 1) * sizeof (struct mlx4_cqe),
+ &mcq->buf.buf);
+ mlx4_ib_db_free(dev, &mcq->db);
+ }
+
+ kfree(mcq);
+
+ return 0;
+}
+
+static void dump_cqe(void *cqe)
+{
+ __be32 *buf = cqe;
+
+ printk(KERN_DEBUG "CQE contents %08x %08x %08x %08x %08x %08x %08x %08x\n",
+ be32_to_cpu(buf[0]), be32_to_cpu(buf[1]), be32_to_cpu(buf[2]),
+ be32_to_cpu(buf[3]), be32_to_cpu(buf[4]), be32_to_cpu(buf[5]),
+ be32_to_cpu(buf[6]), be32_to_cpu(buf[7]));
+}
+
+static void mlx4_ib_handle_error_cqe(struct mlx4_err_cqe *cqe,
+ struct ib_wc *wc)
+{
+ if (cqe->syndrome == MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR) {
+ printk(KERN_DEBUG "local QP operation err "
+ "(QPN %06x, WQE index %x, vendor syndrome %02x, "
+ "opcode = %02x)\n",
+ be32_to_cpu(cqe->my_qpn), be16_to_cpu(cqe->wqe_index),
+ cqe->vendor_err_syndrome,
+ cqe->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK);
+ dump_cqe(cqe);
+ }
+
+ switch (cqe->syndrome) {
+ case MLX4_CQE_SYNDROME_LOCAL_LENGTH_ERR:
+ wc->status = IB_WC_LOC_LEN_ERR;
+ break;
+ case MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR:
+ wc->status = IB_WC_LOC_QP_OP_ERR;
+ break;
+ case MLX4_CQE_SYNDROME_LOCAL_PROT_ERR:
+ wc->status = IB_WC_LOC_PROT_ERR;
+ break;
+ case MLX4_CQE_SYNDROME_WR_FLUSH_ERR:
+ wc->status = IB_WC_WR_FLUSH_ERR;
+ break;
+ case MLX4_CQE_SYNDROME_MW_BIND_ERR:
+ wc->status = IB_WC_MW_BIND_ERR;
+ break;
+ case MLX4_CQE_SYNDROME_BAD_RESP_ERR:
+ wc->status = IB_WC_BAD_RESP_ERR;
+ break;
+ case MLX4_CQE_SYNDROME_LOCAL_ACCESS_ERR:
+ wc->status = IB_WC_LOC_ACCESS_ERR;
+ break;
+ case MLX4_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR:
+ wc->status = IB_WC_REM_INV_REQ_ERR;
+ break;
+ case MLX4_CQE_SYNDROME_REMOTE_ACCESS_ERR:
+ wc->status = IB_WC_REM_ACCESS_ERR;
+ break;
+ case MLX4_CQE_SYNDROME_REMOTE_OP_ERR:
+ wc->status = IB_WC_REM_OP_ERR;
+ break;
+ case MLX4_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR:
+ wc->status = IB_WC_RETRY_EXC_ERR;
+ break;
+ case MLX4_CQE_SYNDROME_RNR_RETRY_EXC_ERR:
+ wc->status = IB_WC_RNR_RETRY_EXC_ERR;
+ break;
+ case MLX4_CQE_SYNDROME_REMOTE_ABORTED_ERR:
+ wc->status = IB_WC_REM_ABORT_ERR;
+ break;
+ default:
+ wc->status = IB_WC_GENERAL_ERR;
+ break;
+ }
+
+ wc->vendor_err = cqe->vendor_err_syndrome;
+}
+
+static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
+ struct mlx4_ib_qp **cur_qp,
+ struct ib_wc *wc)
+{
+ struct mlx4_cqe *cqe;
+ struct mlx4_qp *mqp;
+ struct mlx4_ib_wq *wq;
+ struct mlx4_ib_srq *srq;
+ int is_send;
+ int is_error;
+ u16 wqe_ctr;
+
+ cqe = next_cqe_sw(cq);
+ if (!cqe)
+ return -EAGAIN;
+
+ ++cq->mcq.cons_index;
+
+ /*
+ * Make sure we read CQ entry contents after we've checked the
+ * ownership bit.
+ */
+ rmb();
+
+ is_send = cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK;
+ is_error = (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
+ MLX4_CQE_OPCODE_ERROR;
+
+ if (!*cur_qp ||
+ (be32_to_cpu(cqe->my_qpn) & 0xffffff) != (*cur_qp)->mqp.qpn) {
+ /*
+ * We do not have to take the QP table lock here,
+ * because CQs will be locked while QPs are removed
+ * from the table.
+ */
+ mqp = __mlx4_qp_lookup(to_mdev(cq->ibcq.device)->dev,
+ be32_to_cpu(cqe->my_qpn));
+ if (unlikely(!mqp)) {
+ printk(KERN_WARNING "CQ %06x with entry for unknown QPN %06x\n",
+ cq->mcq.cqn, be32_to_cpu(cqe->my_qpn) & 0xffffff);
+ return -EINVAL;
+ }
+
+ *cur_qp = to_mibqp(mqp);
+ }
+
+ wc->qp = &(*cur_qp)->ibqp;
+
+ if (is_send) {
+ wq = &(*cur_qp)->sq;
+ wqe_ctr = be16_to_cpu(cqe->wqe_index);
+ wq->tail += wqe_ctr - (u16) wq->tail;
+ wc->wr_id = wq->wrid[wq->tail & (wq->max - 1)];
+ ++wq->tail;
+ } else if ((*cur_qp)->ibqp.srq) {
+ srq = to_msrq((*cur_qp)->ibqp.srq);
+ wqe_ctr = be16_to_cpu(cqe->wqe_index);
+ wc->wr_id = srq->wrid[wqe_ctr];
+ mlx4_ib_free_srq_wqe(srq, wqe_ctr);
+ } else {
+ wq = &(*cur_qp)->rq;
+ wc->wr_id = wq->wrid[wq->tail & (wq->max - 1)];
+ ++wq->tail;
+ }
+
+ if (unlikely(is_error)) {
+ mlx4_ib_handle_error_cqe((struct mlx4_err_cqe *) cqe, wc);
+ return 0;
+ }
+
+ wc->status = IB_WC_SUCCESS;
+
+ if (is_send) {
+ wc->wc_flags = 0;
+ switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) {
+ case MLX4_OPCODE_RDMA_WRITE_IMM:
+ wc->wc_flags |= IB_WC_WITH_IMM;
+ case MLX4_OPCODE_RDMA_WRITE:
+ wc->opcode = IB_WC_RDMA_WRITE;
+ break;
+ case MLX4_OPCODE_SEND_IMM:
+ wc->wc_flags |= IB_WC_WITH_IMM;
+ case MLX4_OPCODE_SEND:
+ wc->opcode = IB_WC_SEND;
+ break;
+ case MLX4_OPCODE_RDMA_READ:
+ wc->opcode = IB_WC_SEND;
+ wc->byte_len = be32_to_cpu(cqe->byte_cnt);
+ break;
+ case MLX4_OPCODE_ATOMIC_CS:
+ wc->opcode = IB_WC_COMP_SWAP;
+ wc->byte_len = 8;
+ break;
+ case MLX4_OPCODE_ATOMIC_FA:
+ wc->opcode = IB_WC_FETCH_ADD;
+ wc->byte_len = 8;
+ break;
+ case MLX4_OPCODE_BIND_MW:
+ wc->opcode = IB_WC_BIND_MW;
+ break;
+ }
+ } else {
+ wc->byte_len = be32_to_cpu(cqe->byte_cnt);
+
+ switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) {
+ case MLX4_RECV_OPCODE_RDMA_WRITE_IMM:
+ wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
+ wc->wc_flags = IB_WC_WITH_IMM;
+ wc->imm_data = cqe->immed_rss_invalid;
+ break;
+ case MLX4_RECV_OPCODE_SEND:
+ wc->opcode = IB_WC_RECV;
+ wc->wc_flags = 0;
+ break;
+ case MLX4_RECV_OPCODE_SEND_IMM:
+ wc->opcode = IB_WC_RECV;
+ wc->wc_flags = IB_WC_WITH_IMM;
+ wc->imm_data = cqe->immed_rss_invalid;
+ break;
+ }
+
+ wc->slid = be16_to_cpu(cqe->rlid);
+ wc->sl = cqe->sl >> 4;
+ wc->src_qp = be32_to_cpu(cqe->g_mlpath_rqpn) & 0xffffff;
+ wc->dlid_path_bits = (be32_to_cpu(cqe->g_mlpath_rqpn) >> 24) & 0x7f;
+ wc->wc_flags |= be32_to_cpu(cqe->g_mlpath_rqpn) & 0x80000000 ?
+ IB_WC_GRH : 0;
+ wc->pkey_index = be32_to_cpu(cqe->immed_rss_invalid) >> 16;
+ }
+
+ return 0;
+}
+
+int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
+{
+ struct mlx4_ib_cq *cq = to_mcq(ibcq);
+ struct mlx4_ib_qp *cur_qp = NULL;
+ unsigned long flags;
+ int npolled;
+ int err = 0;
+
+ spin_lock_irqsave(&cq->lock, flags);
+
+ for (npolled = 0; npolled < num_entries; ++npolled) {
+ err = mlx4_ib_poll_one(cq, &cur_qp, wc + npolled);
+ if (err)
+ break;
+ }
+
+ if (npolled)
+ mlx4_cq_set_ci(&cq->mcq);
+
+ spin_unlock_irqrestore(&cq->lock, flags);
+
+ if (err == 0 || err == -EAGAIN)
+ return npolled;
+ else
+ return err;
+}
+
+int mlx4_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
+{
+ mlx4_cq_arm(&to_mcq(ibcq)->mcq,
+ (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
+ MLX4_CQ_DB_REQ_NOT_SOL : MLX4_CQ_DB_REQ_NOT,
+ to_mdev(ibcq->device)->uar_map,
+ MLX4_GET_DOORBELL_LOCK(&to_mdev(ibcq->device)->uar_lock));
+
+ return 0;
+}
+
+void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq)
+{
+ u32 prod_index;
+ int nfreed = 0;
+ struct mlx4_cqe *cqe;
+
+ /*
+ * First we need to find the current producer index, so we
+ * know where to start cleaning from. It doesn't matter if HW
+ * adds new entries after this loop -- the QP we're worried
+ * about is already in RESET, so the new entries won't come
+ * from our QP and therefore don't need to be checked.
+ */
+ for (prod_index = cq->mcq.cons_index; get_sw_cqe(cq, prod_index); ++prod_index)
+ if (prod_index == cq->mcq.cons_index + cq->ibcq.cqe)
+ break;
+
+ /*
+ * Now sweep backwards through the CQ, removing CQ entries
+ * that match our QP by copying older entries on top of them.
+ */
+ while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) {
+ cqe = get_cqe(cq, prod_index & cq->ibcq.cqe);
+ if ((be32_to_cpu(cqe->my_qpn) & 0xffffff) == qpn) {
+ if (srq && !(cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK))
+ mlx4_ib_free_srq_wqe(srq, be16_to_cpu(cqe->wqe_index));
+ ++nfreed;
+ } else if (nfreed)
+ memcpy(get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe),
+ cqe, sizeof *cqe);
+ }
+
+ if (nfreed) {
+ cq->mcq.cons_index += nfreed;
+ /*
+ * Make sure update of buffer contents is done before
+ * updating consumer index.
+ */
+ wmb();
+ mlx4_cq_set_ci(&cq->mcq);
+ }
+}
+
+void mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq)
+{
+ spin_lock_irq(&cq->lock);
+ __mlx4_ib_cq_clean(cq, qpn, srq);
+ spin_unlock_irq(&cq->lock);
+}
diff --git a/drivers/infiniband/hw/mlx4/doorbell.c b/drivers/infiniband/hw/mlx4/doorbell.c
new file mode 100644
index 00000000000..1c36087aef1
--- /dev/null
+++ b/drivers/infiniband/hw/mlx4/doorbell.c
@@ -0,0 +1,216 @@
+/*
+ * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/slab.h>
+
+#include "mlx4_ib.h"
+
+struct mlx4_ib_db_pgdir {
+ struct list_head list;
+ DECLARE_BITMAP(order0, MLX4_IB_DB_PER_PAGE);
+ DECLARE_BITMAP(order1, MLX4_IB_DB_PER_PAGE / 2);
+ unsigned long *bits[2];
+ __be32 *db_page;
+ dma_addr_t db_dma;
+};
+
+static struct mlx4_ib_db_pgdir *mlx4_ib_alloc_db_pgdir(struct mlx4_ib_dev *dev)
+{
+ struct mlx4_ib_db_pgdir *pgdir;
+
+ pgdir = kzalloc(sizeof *pgdir, GFP_KERNEL);
+ if (!pgdir)
+ return NULL;
+
+ bitmap_fill(pgdir->order1, MLX4_IB_DB_PER_PAGE / 2);
+ pgdir->bits[0] = pgdir->order0;
+ pgdir->bits[1] = pgdir->order1;
+ pgdir->db_page = dma_alloc_coherent(dev->ib_dev.dma_device,
+ PAGE_SIZE, &pgdir->db_dma,
+ GFP_KERNEL);
+ if (!pgdir->db_page) {
+ kfree(pgdir);
+ return NULL;
+ }
+
+ return pgdir;
+}
+
+static int mlx4_ib_alloc_db_from_pgdir(struct mlx4_ib_db_pgdir *pgdir,
+ struct mlx4_ib_db *db, int order)
+{
+ int o;
+ int i;
+
+ for (o = order; o <= 1; ++o) {
+ i = find_first_bit(pgdir->bits[o], MLX4_IB_DB_PER_PAGE >> o);
+ if (i < MLX4_IB_DB_PER_PAGE >> o)
+ goto found;
+ }
+
+ return -ENOMEM;
+
+found:
+ clear_bit(i, pgdir->bits[o]);
+
+ i <<= o;
+
+ if (o > order)
+ set_bit(i ^ 1, pgdir->bits[order]);
+
+ db->u.pgdir = pgdir;
+ db->index = i;
+ db->db = pgdir->db_page + db->index;
+ db->dma = pgdir->db_dma + db->index * 4;
+ db->order = order;
+
+ return 0;
+}
+
+int mlx4_ib_db_alloc(struct mlx4_ib_dev *dev, struct mlx4_ib_db *db, int order)
+{
+ struct mlx4_ib_db_pgdir *pgdir;
+ int ret = 0;
+
+ mutex_lock(&dev->pgdir_mutex);
+
+ list_for_each_entry(pgdir, &dev->pgdir_list, list)
+ if (!mlx4_ib_alloc_db_from_pgdir(pgdir, db, order))
+ goto out;
+
+ pgdir = mlx4_ib_alloc_db_pgdir(dev);
+ if (!pgdir) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ list_add(&pgdir->list, &dev->pgdir_list);
+
+ /* This should never fail -- we just allocated an empty page: */
+ WARN_ON(mlx4_ib_alloc_db_from_pgdir(pgdir, db, order));
+
+out:
+ mutex_unlock(&dev->pgdir_mutex);
+
+ return ret;
+}
+
+void mlx4_ib_db_free(struct mlx4_ib_dev *dev, struct mlx4_ib_db *db)
+{
+ int o;
+ int i;
+
+ mutex_lock(&dev->pgdir_mutex);
+
+ o = db->order;
+ i = db->index;
+
+ if (db->order == 0 && test_bit(i ^ 1, db->u.pgdir->order0)) {
+ clear_bit(i ^ 1, db->u.pgdir->order0);
+ ++o;
+ }
+
+ i >>= o;
+ set_bit(i, db->u.pgdir->bits[o]);
+
+ if (bitmap_full(db->u.pgdir->order1, MLX4_IB_DB_PER_PAGE / 2)) {
+ dma_free_coherent(dev->ib_dev.dma_device, PAGE_SIZE,
+ db->u.pgdir->db_page, db->u.pgdir->db_dma);
+ list_del(&db->u.pgdir->list);
+ kfree(db->u.pgdir);
+ }
+
+ mutex_unlock(&dev->pgdir_mutex);
+}
+
+struct mlx4_ib_user_db_page {
+ struct list_head list;
+ struct ib_umem *umem;
+ unsigned long user_virt;
+ int refcnt;
+};
+
+int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, unsigned long virt,
+ struct mlx4_ib_db *db)
+{
+ struct mlx4_ib_user_db_page *page;
+ struct ib_umem_chunk *chunk;
+ int err = 0;
+
+ mutex_lock(&context->db_page_mutex);
+
+ list_for_each_entry(page, &context->db_page_list, list)
+ if (page->user_virt == (virt & PAGE_MASK))
+ goto found;
+
+ page = kmalloc(sizeof *page, GFP_KERNEL);
+ if (!page) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ page->user_virt = (virt & PAGE_MASK);
+ page->refcnt = 0;
+ page->umem = ib_umem_get(&context->ibucontext, virt & PAGE_MASK,
+ PAGE_SIZE, 0);
+ if (IS_ERR(page->umem)) {
+ err = PTR_ERR(page->umem);
+ kfree(page);
+ goto out;
+ }
+
+ list_add(&page->list, &context->db_page_list);
+
+found:
+ chunk = list_entry(page->umem->chunk_list.next, struct ib_umem_chunk, list);
+ db->dma = sg_dma_address(chunk->page_list) + (virt & ~PAGE_MASK);
+ db->u.user_page = page;
+ ++page->refcnt;
+
+out:
+ mutex_unlock(&context->db_page_mutex);
+
+ return err;
+}
+
+void mlx4_ib_db_unmap_user(struct mlx4_ib_ucontext *context, struct mlx4_ib_db *db)
+{
+ mutex_lock(&context->db_page_mutex);
+
+ if (!--db->u.user_page->refcnt) {
+ list_del(&db->u.user_page->list);
+ ib_umem_release(db->u.user_page->umem);
+ kfree(db->u.user_page);
+ }
+
+ mutex_unlock(&context->db_page_mutex);
+}
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
new file mode 100644
index 00000000000..333091787c5
--- /dev/null
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -0,0 +1,339 @@
+/*
+ * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <rdma/ib_mad.h>
+#include <rdma/ib_smi.h>
+
+#include <linux/mlx4/cmd.h>
+
+#include "mlx4_ib.h"
+
+enum {
+ MLX4_IB_VENDOR_CLASS1 = 0x9,
+ MLX4_IB_VENDOR_CLASS2 = 0xa
+};
+
+int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int ignore_mkey, int ignore_bkey,
+ int port, struct ib_wc *in_wc, struct ib_grh *in_grh,
+ void *in_mad, void *response_mad)
+{
+ struct mlx4_cmd_mailbox *inmailbox, *outmailbox;
+ void *inbox;
+ int err;
+ u32 in_modifier = port;
+ u8 op_modifier = 0;
+
+ inmailbox = mlx4_alloc_cmd_mailbox(dev->dev);
+ if (IS_ERR(inmailbox))
+ return PTR_ERR(inmailbox);
+ inbox = inmailbox->buf;
+
+ outmailbox = mlx4_alloc_cmd_mailbox(dev->dev);
+ if (IS_ERR(outmailbox)) {
+ mlx4_free_cmd_mailbox(dev->dev, inmailbox);
+ return PTR_ERR(outmailbox);
+ }
+
+ memcpy(inbox, in_mad, 256);
+
+ /*
+ * Key check traps can't be generated unless we have in_wc to
+ * tell us where to send the trap.
+ */
+ if (ignore_mkey || !in_wc)
+ op_modifier |= 0x1;
+ if (ignore_bkey || !in_wc)
+ op_modifier |= 0x2;
+
+ if (in_wc) {
+ struct {
+ __be32 my_qpn;
+ u32 reserved1;
+ __be32 rqpn;
+ u8 sl;
+ u8 g_path;
+ u16 reserved2[2];
+ __be16 pkey;
+ u32 reserved3[11];
+ u8 grh[40];
+ } *ext_info;
+
+ memset(inbox + 256, 0, 256);
+ ext_info = inbox + 256;
+
+ ext_info->my_qpn = cpu_to_be32(in_wc->qp->qp_num);
+ ext_info->rqpn = cpu_to_be32(in_wc->src_qp);
+ ext_info->sl = in_wc->sl << 4;
+ ext_info->g_path = in_wc->dlid_path_bits |
+ (in_wc->wc_flags & IB_WC_GRH ? 0x80 : 0);
+ ext_info->pkey = cpu_to_be16(in_wc->pkey_index);
+
+ if (in_grh)
+ memcpy(ext_info->grh, in_grh, 40);
+
+ op_modifier |= 0x4;
+
+ in_modifier |= in_wc->slid << 16;
+ }
+
+ err = mlx4_cmd_box(dev->dev, inmailbox->dma, outmailbox->dma,
+ in_modifier, op_modifier,
+ MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C);
+
+ if (!err);
+ memcpy(response_mad, outmailbox->buf, 256);
+
+ mlx4_free_cmd_mailbox(dev->dev, inmailbox);
+ mlx4_free_cmd_mailbox(dev->dev, outmailbox);
+
+ return err;
+}
+
+static void update_sm_ah(struct mlx4_ib_dev *dev, u8 port_num, u16 lid, u8 sl)
+{
+ struct ib_ah *new_ah;
+ struct ib_ah_attr ah_attr;
+
+ if (!dev->send_agent[port_num - 1][0])
+ return;
+
+ memset(&ah_attr, 0, sizeof ah_attr);
+ ah_attr.dlid = lid;
+ ah_attr.sl = sl;
+ ah_attr.port_num = port_num;
+
+ new_ah = ib_create_ah(dev->send_agent[port_num - 1][0]->qp->pd,
+ &ah_attr);
+ if (IS_ERR(new_ah))
+ return;
+
+ spin_lock(&dev->sm_lock);
+ if (dev->sm_ah[port_num - 1])
+ ib_destroy_ah(dev->sm_ah[port_num - 1]);
+ dev->sm_ah[port_num - 1] = new_ah;
+ spin_unlock(&dev->sm_lock);
+}
+
+/*
+ * Snoop SM MADs for port info and P_Key table sets, so we can
+ * synthesize LID change and P_Key change events.
+ */
+static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad)
+{
+ struct ib_event event;
+
+ if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
+ mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
+ mad->mad_hdr.method == IB_MGMT_METHOD_SET) {
+ if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO) {
+ struct ib_port_info *pinfo =
+ (struct ib_port_info *) ((struct ib_smp *) mad)->data;
+
+ update_sm_ah(to_mdev(ibdev), port_num,
+ be16_to_cpu(pinfo->sm_lid),
+ pinfo->neighbormtu_mastersmsl & 0xf);
+
+ event.device = ibdev;
+ event.element.port_num = port_num;
+
+ if(pinfo->clientrereg_resv_subnetto & 0x80)
+ event.event = IB_EVENT_CLIENT_REREGISTER;
+ else
+ event.event = IB_EVENT_LID_CHANGE;
+
+ ib_dispatch_event(&event);
+ }
+
+ if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PKEY_TABLE) {
+ event.device = ibdev;
+ event.event = IB_EVENT_PKEY_CHANGE;
+ event.element.port_num = port_num;
+ ib_dispatch_event(&event);
+ }
+ }
+}
+
+static void node_desc_override(struct ib_device *dev,
+ struct ib_mad *mad)
+{
+ if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
+ mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
+ mad->mad_hdr.method == IB_MGMT_METHOD_GET_RESP &&
+ mad->mad_hdr.attr_id == IB_SMP_ATTR_NODE_DESC) {
+ spin_lock(&to_mdev(dev)->sm_lock);
+ memcpy(((struct ib_smp *) mad)->data, dev->node_desc, 64);
+ spin_unlock(&to_mdev(dev)->sm_lock);
+ }
+}
+
+static void forward_trap(struct mlx4_ib_dev *dev, u8 port_num, struct ib_mad *mad)
+{
+ int qpn = mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED;
+ struct ib_mad_send_buf *send_buf;
+ struct ib_mad_agent *agent = dev->send_agent[port_num - 1][qpn];
+ int ret;
+
+ if (agent) {
+ send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR,
+ IB_MGMT_MAD_DATA, GFP_ATOMIC);
+ /*
+ * We rely here on the fact that MLX QPs don't use the
+ * address handle after the send is posted (this is
+ * wrong following the IB spec strictly, but we know
+ * it's OK for our devices).
+ */
+ spin_lock(&dev->sm_lock);
+ memcpy(send_buf->mad, mad, sizeof *mad);
+ if ((send_buf->ah = dev->sm_ah[port_num - 1]))
+ ret = ib_post_send_mad(send_buf, NULL);
+ else
+ ret = -EINVAL;
+ spin_unlock(&dev->sm_lock);
+
+ if (ret)
+ ib_free_send_mad(send_buf);
+ }
+}
+
+int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
+ struct ib_wc *in_wc, struct ib_grh *in_grh,
+ struct ib_mad *in_mad, struct ib_mad *out_mad)
+{
+ u16 slid;
+ int err;
+
+ slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE);
+
+ if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && slid == 0) {
+ forward_trap(to_mdev(ibdev), port_num, in_mad);
+ return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
+ }
+
+ if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
+ in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
+ if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET &&
+ in_mad->mad_hdr.method != IB_MGMT_METHOD_SET &&
+ in_mad->mad_hdr.method != IB_MGMT_METHOD_TRAP_REPRESS)
+ return IB_MAD_RESULT_SUCCESS;
+
+ /*
+ * Don't process SMInfo queries or vendor-specific
+ * MADs -- the SMA can't handle them.
+ */
+ if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO ||
+ ((in_mad->mad_hdr.attr_id & IB_SMP_ATTR_VENDOR_MASK) ==
+ IB_SMP_ATTR_VENDOR_MASK))
+ return IB_MAD_RESULT_SUCCESS;
+ } else if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT ||
+ in_mad->mad_hdr.mgmt_class == MLX4_IB_VENDOR_CLASS1 ||
+ in_mad->mad_hdr.mgmt_class == MLX4_IB_VENDOR_CLASS2) {
+ if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET &&
+ in_mad->mad_hdr.method != IB_MGMT_METHOD_SET)
+ return IB_MAD_RESULT_SUCCESS;
+ } else
+ return IB_MAD_RESULT_SUCCESS;
+
+ err = mlx4_MAD_IFC(to_mdev(ibdev),
+ mad_flags & IB_MAD_IGNORE_MKEY,
+ mad_flags & IB_MAD_IGNORE_BKEY,
+ port_num, in_wc, in_grh, in_mad, out_mad);
+ if (err)
+ return IB_MAD_RESULT_FAILURE;
+
+ if (!out_mad->mad_hdr.status) {
+ smp_snoop(ibdev, port_num, in_mad);
+ node_desc_override(ibdev, out_mad);
+ }
+
+ /* set return bit in status of directed route responses */
+ if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
+ out_mad->mad_hdr.status |= cpu_to_be16(1 << 15);
+
+ if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS)
+ /* no response for trap repress */
+ return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
+
+ return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
+}
+
+static void send_handler(struct ib_mad_agent *agent,
+ struct ib_mad_send_wc *mad_send_wc)
+{
+ ib_free_send_mad(mad_send_wc->send_buf);
+}
+
+int mlx4_ib_mad_init(struct mlx4_ib_dev *dev)
+{
+ struct ib_mad_agent *agent;
+ int p, q;
+ int ret;
+
+ for (p = 0; p < dev->dev->caps.num_ports; ++p)
+ for (q = 0; q <= 1; ++q) {
+ agent = ib_register_mad_agent(&dev->ib_dev, p + 1,
+ q ? IB_QPT_GSI : IB_QPT_SMI,
+ NULL, 0, send_handler,
+ NULL, NULL);
+ if (IS_ERR(agent)) {
+ ret = PTR_ERR(agent);
+ goto err;
+ }
+ dev->send_agent[p][q] = agent;
+ }
+
+ return 0;
+
+err:
+ for (p = 0; p < dev->dev->caps.num_ports; ++p)
+ for (q = 0; q <= 1; ++q)
+ if (dev->send_agent[p][q])
+ ib_unregister_mad_agent(dev->send_agent[p][q]);
+
+ return ret;
+}
+
+void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev)
+{
+ struct ib_mad_agent *agent;
+ int p, q;
+
+ for (p = 0; p < dev->dev->caps.num_ports; ++p) {
+ for (q = 0; q <= 1; ++q) {
+ agent = dev->send_agent[p][q];
+ dev->send_agent[p][q] = NULL;
+ ib_unregister_mad_agent(agent);
+ }
+
+ if (dev->sm_ah[p])
+ ib_destroy_ah(dev->sm_ah[p]);
+ }
+}
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
new file mode 100644
index 00000000000..688ecb4c39f
--- /dev/null
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -0,0 +1,651 @@
+/*
+ * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+
+#include <rdma/ib_smi.h>
+#include <rdma/ib_user_verbs.h>
+
+#include <linux/mlx4/driver.h>
+#include <linux/mlx4/cmd.h>
+
+#include "mlx4_ib.h"
+#include "user.h"
+
+#define DRV_NAME "mlx4_ib"
+#define DRV_VERSION "0.01"
+#define DRV_RELDATE "May 1, 2006"
+
+MODULE_AUTHOR("Roland Dreier");
+MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION(DRV_VERSION);
+
+static const char mlx4_ib_version[] __devinitdata =
+ DRV_NAME ": Mellanox ConnectX InfiniBand driver v"
+ DRV_VERSION " (" DRV_RELDATE ")\n";
+
+static void init_query_mad(struct ib_smp *mad)
+{
+ mad->base_version = 1;
+ mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
+ mad->class_version = 1;
+ mad->method = IB_MGMT_METHOD_GET;
+}
+
+static int mlx4_ib_query_device(struct ib_device *ibdev,
+ struct ib_device_attr *props)
+{
+ struct mlx4_ib_dev *dev = to_mdev(ibdev);
+ struct ib_smp *in_mad = NULL;
+ struct ib_smp *out_mad = NULL;
+ int err = -ENOMEM;
+
+ in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
+ out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
+ if (!in_mad || !out_mad)
+ goto out;
+
+ init_query_mad(in_mad);
+ in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
+
+ err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, 1, NULL, NULL, in_mad, out_mad);
+ if (err)
+ goto out;
+
+ memset(props, 0, sizeof *props);
+
+ props->fw_ver = dev->dev->caps.fw_ver;
+ props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT |
+ IB_DEVICE_PORT_ACTIVE_EVENT |
+ IB_DEVICE_SYS_IMAGE_GUID |
+ IB_DEVICE_RC_RNR_NAK_GEN;
+ if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR)
+ props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
+ if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR)
+ props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
+ if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_APM)
+ props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
+ if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UD_AV_PORT)
+ props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE;
+
+ props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
+ 0xffffff;
+ props->vendor_part_id = be16_to_cpup((__be16 *) (out_mad->data + 30));
+ props->hw_ver = be32_to_cpup((__be32 *) (out_mad->data + 32));
+ memcpy(&props->sys_image_guid, out_mad->data + 4, 8);
+
+ props->max_mr_size = ~0ull;
+ props->page_size_cap = dev->dev->caps.page_size_cap;
+ props->max_qp = dev->dev->caps.num_qps - dev->dev->caps.reserved_qps;
+ props->max_qp_wr = dev->dev->caps.max_wqes;
+ props->max_sge = min(dev->dev->caps.max_sq_sg,
+ dev->dev->caps.max_rq_sg);
+ props->max_cq = dev->dev->caps.num_cqs - dev->dev->caps.reserved_cqs;
+ props->max_cqe = dev->dev->caps.max_cqes;
+ props->max_mr = dev->dev->caps.num_mpts - dev->dev->caps.reserved_mrws;
+ props->max_pd = dev->dev->caps.num_pds - dev->dev->caps.reserved_pds;
+ props->max_qp_rd_atom = dev->dev->caps.max_qp_dest_rdma;
+ props->max_qp_init_rd_atom = dev->dev->caps.max_qp_init_rdma;
+ props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp;
+ props->max_srq = dev->dev->caps.num_srqs - dev->dev->caps.reserved_srqs;
+ props->max_srq_wr = dev->dev->caps.max_srq_wqes;
+ props->max_srq_sge = dev->dev->caps.max_srq_sge;
+ props->local_ca_ack_delay = dev->dev->caps.local_ca_ack_delay;
+ props->atomic_cap = dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_ATOMIC ?
+ IB_ATOMIC_HCA : IB_ATOMIC_NONE;
+ props->max_pkeys = dev->dev->caps.pkey_table_len;
+ props->max_mcast_grp = dev->dev->caps.num_mgms + dev->dev->caps.num_amgms;
+ props->max_mcast_qp_attach = dev->dev->caps.num_qp_per_mgm;
+ props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
+ props->max_mcast_grp;
+ props->max_map_per_fmr = (1 << (32 - ilog2(dev->dev->caps.num_mpts))) - 1;
+
+out:
+ kfree(in_mad);
+ kfree(out_mad);
+
+ return err;
+}
+
+static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
+ struct ib_port_attr *props)
+{
+ struct ib_smp *in_mad = NULL;
+ struct ib_smp *out_mad = NULL;
+ int err = -ENOMEM;
+
+ in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
+ out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
+ if (!in_mad || !out_mad)
+ goto out;
+
+ memset(props, 0, sizeof *props);
+
+ init_query_mad(in_mad);
+ in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
+ in_mad->attr_mod = cpu_to_be32(port);
+
+ err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad);
+ if (err)
+ goto out;
+
+ props->lid = be16_to_cpup((__be16 *) (out_mad->data + 16));
+ props->lmc = out_mad->data[34] & 0x7;
+ props->sm_lid = be16_to_cpup((__be16 *) (out_mad->data + 18));
+ props->sm_sl = out_mad->data[36] & 0xf;
+ props->state = out_mad->data[32] & 0xf;
+ props->phys_state = out_mad->data[33] >> 4;
+ props->port_cap_flags = be32_to_cpup((__be32 *) (out_mad->data + 20));
+ props->gid_tbl_len = to_mdev(ibdev)->dev->caps.gid_table_len;
+ props->max_msg_sz = 0x80000000;
+ props->pkey_tbl_len = to_mdev(ibdev)->dev->caps.pkey_table_len;
+ props->bad_pkey_cntr = be16_to_cpup((__be16 *) (out_mad->data + 46));
+ props->qkey_viol_cntr = be16_to_cpup((__be16 *) (out_mad->data + 48));
+ props->active_width = out_mad->data[31] & 0xf;
+ props->active_speed = out_mad->data[35] >> 4;
+ props->max_mtu = out_mad->data[41] & 0xf;
+ props->active_mtu = out_mad->data[36] >> 4;
+ props->subnet_timeout = out_mad->data[51] & 0x1f;
+ props->max_vl_num = out_mad->data[37] >> 4;
+ props->init_type_reply = out_mad->data[41] >> 4;
+
+out:
+ kfree(in_mad);
+ kfree(out_mad);
+
+ return err;
+}
+
+static int mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
+ union ib_gid *gid)
+{
+ struct ib_smp *in_mad = NULL;
+ struct ib_smp *out_mad = NULL;
+ int err = -ENOMEM;
+
+ in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
+ out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
+ if (!in_mad || !out_mad)
+ goto out;
+
+ init_query_mad(in_mad);
+ in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
+ in_mad->attr_mod = cpu_to_be32(port);
+
+ err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad);
+ if (err)
+ goto out;
+
+ memcpy(gid->raw, out_mad->data + 8, 8);
+
+ init_query_mad(in_mad);
+ in_mad->attr_id = IB_SMP_ATTR_GUID_INFO;
+ in_mad->attr_mod = cpu_to_be32(index / 8);
+
+ err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad);
+ if (err)
+ goto out;
+
+ memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8);
+
+out:
+ kfree(in_mad);
+ kfree(out_mad);
+ return err;
+}
+
+static int mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
+ u16 *pkey)
+{
+ struct ib_smp *in_mad = NULL;
+ struct ib_smp *out_mad = NULL;
+ int err = -ENOMEM;
+
+ in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
+ out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
+ if (!in_mad || !out_mad)
+ goto out;
+
+ init_query_mad(in_mad);
+ in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE;
+ in_mad->attr_mod = cpu_to_be32(index / 32);
+
+ err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad);
+ if (err)
+ goto out;
+
+ *pkey = be16_to_cpu(((__be16 *) out_mad->data)[index % 32]);
+
+out:
+ kfree(in_mad);
+ kfree(out_mad);
+ return err;
+}
+
+static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
+ struct ib_device_modify *props)
+{
+ if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
+ return -EOPNOTSUPP;
+
+ if (mask & IB_DEVICE_MODIFY_NODE_DESC) {
+ spin_lock(&to_mdev(ibdev)->sm_lock);
+ memcpy(ibdev->node_desc, props->node_desc, 64);
+ spin_unlock(&to_mdev(ibdev)->sm_lock);
+ }
+
+ return 0;
+}
+
+static int mlx4_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_viols,
+ u32 cap_mask)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ int err;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev->dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ memset(mailbox->buf, 0, 256);
+ *(u8 *) mailbox->buf = !!reset_qkey_viols << 6;
+ ((__be32 *) mailbox->buf)[2] = cpu_to_be32(cap_mask);
+
+ err = mlx4_cmd(dev->dev, mailbox->dma, port, 0, MLX4_CMD_SET_PORT,
+ MLX4_CMD_TIME_CLASS_B);
+
+ mlx4_free_cmd_mailbox(dev->dev, mailbox);
+ return err;
+}
+
+static int mlx4_ib_modify_port(struct ib_device *ibdev, u8 port, int mask,
+ struct ib_port_modify *props)
+{
+ struct ib_port_attr attr;
+ u32 cap_mask;
+ int err;
+
+ mutex_lock(&to_mdev(ibdev)->cap_mask_mutex);
+
+ err = mlx4_ib_query_port(ibdev, port, &attr);
+ if (err)
+ goto out;
+
+ cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) &
+ ~props->clr_port_cap_mask;
+
+ err = mlx4_SET_PORT(to_mdev(ibdev), port,
+ !!(mask & IB_PORT_RESET_QKEY_CNTR),
+ cap_mask);
+
+out:
+ mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex);
+ return err;
+}
+
+static struct ib_ucontext *mlx4_ib_alloc_ucontext(struct ib_device *ibdev,
+ struct ib_udata *udata)
+{
+ struct mlx4_ib_dev *dev = to_mdev(ibdev);
+ struct mlx4_ib_ucontext *context;
+ struct mlx4_ib_alloc_ucontext_resp resp;
+ int err;
+
+ resp.qp_tab_size = dev->dev->caps.num_qps;
+ resp.bf_reg_size = dev->dev->caps.bf_reg_size;
+ resp.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
+
+ context = kmalloc(sizeof *context, GFP_KERNEL);
+ if (!context)
+ return ERR_PTR(-ENOMEM);
+
+ err = mlx4_uar_alloc(to_mdev(ibdev)->dev, &context->uar);
+ if (err) {
+ kfree(context);
+ return ERR_PTR(err);
+ }
+
+ INIT_LIST_HEAD(&context->db_page_list);
+ mutex_init(&context->db_page_mutex);
+
+ err = ib_copy_to_udata(udata, &resp, sizeof resp);
+ if (err) {
+ mlx4_uar_free(to_mdev(ibdev)->dev, &context->uar);
+ kfree(context);
+ return ERR_PTR(-EFAULT);
+ }
+
+ return &context->ibucontext;
+}
+
+static int mlx4_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
+{
+ struct mlx4_ib_ucontext *context = to_mucontext(ibcontext);
+
+ mlx4_uar_free(to_mdev(ibcontext->device)->dev, &context->uar);
+ kfree(context);
+
+ return 0;
+}
+
+static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
+{
+ struct mlx4_ib_dev *dev = to_mdev(context->device);
+
+ if (vma->vm_end - vma->vm_start != PAGE_SIZE)
+ return -EINVAL;
+
+ if (vma->vm_pgoff == 0) {
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ if (io_remap_pfn_range(vma, vma->vm_start,
+ to_mucontext(context)->uar.pfn,
+ PAGE_SIZE, vma->vm_page_prot))
+ return -EAGAIN;
+ } else if (vma->vm_pgoff == 1 && dev->dev->caps.bf_reg_size != 0) {
+ /* FIXME want pgprot_writecombine() for BlueFlame pages */
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ if (io_remap_pfn_range(vma, vma->vm_start,
+ to_mucontext(context)->uar.pfn +
+ dev->dev->caps.num_uars,
+ PAGE_SIZE, vma->vm_page_prot))
+ return -EAGAIN;
+ } else
+ return -EINVAL;
+
+ return 0;
+}
+
+static struct ib_pd *mlx4_ib_alloc_pd(struct ib_device *ibdev,
+ struct ib_ucontext *context,
+ struct ib_udata *udata)
+{
+ struct mlx4_ib_pd *pd;
+ int err;
+
+ pd = kmalloc(sizeof *pd, GFP_KERNEL);
+ if (!pd)
+ return ERR_PTR(-ENOMEM);
+
+ err = mlx4_pd_alloc(to_mdev(ibdev)->dev, &pd->pdn);
+ if (err) {
+ kfree(pd);
+ return ERR_PTR(err);
+ }
+
+ if (context)
+ if (ib_copy_to_udata(udata, &pd->pdn, sizeof (__u32))) {
+ mlx4_pd_free(to_mdev(ibdev)->dev, pd->pdn);
+ kfree(pd);
+ return ERR_PTR(-EFAULT);
+ }
+
+ return &pd->ibpd;
+}
+
+static int mlx4_ib_dealloc_pd(struct ib_pd *pd)
+{
+ mlx4_pd_free(to_mdev(pd->device)->dev, to_mpd(pd)->pdn);
+ kfree(pd);
+
+ return 0;
+}
+
+static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
+{
+ return mlx4_multicast_attach(to_mdev(ibqp->device)->dev,
+ &to_mqp(ibqp)->mqp, gid->raw);
+}
+
+static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
+{
+ return mlx4_multicast_detach(to_mdev(ibqp->device)->dev,
+ &to_mqp(ibqp)->mqp, gid->raw);
+}
+
+static int init_node_data(struct mlx4_ib_dev *dev)
+{
+ struct ib_smp *in_mad = NULL;
+ struct ib_smp *out_mad = NULL;
+ int err = -ENOMEM;
+
+ in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
+ out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
+ if (!in_mad || !out_mad)
+ goto out;
+
+ init_query_mad(in_mad);
+ in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
+
+ err = mlx4_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad);
+ if (err)
+ goto out;
+
+ memcpy(dev->ib_dev.node_desc, out_mad->data, 64);
+
+ in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
+
+ err = mlx4_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad);
+ if (err)
+ goto out;
+
+ memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
+
+out:
+ kfree(in_mad);
+ kfree(out_mad);
+ return err;
+}
+
+static void *mlx4_ib_add(struct mlx4_dev *dev)
+{
+ struct mlx4_ib_dev *ibdev;
+
+ ibdev = (struct mlx4_ib_dev *) ib_alloc_device(sizeof *ibdev);
+ if (!ibdev) {
+ dev_err(&dev->pdev->dev, "Device struct alloc failed\n");
+ return NULL;
+ }
+
+ if (mlx4_pd_alloc(dev, &ibdev->priv_pdn))
+ goto err_dealloc;
+
+ if (mlx4_uar_alloc(dev, &ibdev->priv_uar))
+ goto err_pd;
+
+ ibdev->uar_map = ioremap(ibdev->priv_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
+ if (!ibdev->uar_map)
+ goto err_uar;
+
+ INIT_LIST_HEAD(&ibdev->pgdir_list);
+ mutex_init(&ibdev->pgdir_mutex);
+
+ ibdev->dev = dev;
+
+ strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX);
+ ibdev->ib_dev.owner = THIS_MODULE;
+ ibdev->ib_dev.node_type = RDMA_NODE_IB_CA;
+ ibdev->ib_dev.phys_port_cnt = dev->caps.num_ports;
+ ibdev->ib_dev.num_comp_vectors = 1;
+ ibdev->ib_dev.dma_device = &dev->pdev->dev;
+
+ ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION;
+ ibdev->ib_dev.uverbs_cmd_mask =
+ (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
+ (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
+ (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
+ (1ull << IB_USER_VERBS_CMD_REG_MR) |
+ (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
+ (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
+ (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
+ (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
+ (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
+ (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
+ (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
+ (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ);
+
+ ibdev->ib_dev.query_device = mlx4_ib_query_device;
+ ibdev->ib_dev.query_port = mlx4_ib_query_port;
+ ibdev->ib_dev.query_gid = mlx4_ib_query_gid;
+ ibdev->ib_dev.query_pkey = mlx4_ib_query_pkey;
+ ibdev->ib_dev.modify_device = mlx4_ib_modify_device;
+ ibdev->ib_dev.modify_port = mlx4_ib_modify_port;
+ ibdev->ib_dev.alloc_ucontext = mlx4_ib_alloc_ucontext;
+ ibdev->ib_dev.dealloc_ucontext = mlx4_ib_dealloc_ucontext;
+ ibdev->ib_dev.mmap = mlx4_ib_mmap;
+ ibdev->ib_dev.alloc_pd = mlx4_ib_alloc_pd;
+ ibdev->ib_dev.dealloc_pd = mlx4_ib_dealloc_pd;
+ ibdev->ib_dev.create_ah = mlx4_ib_create_ah;
+ ibdev->ib_dev.query_ah = mlx4_ib_query_ah;
+ ibdev->ib_dev.destroy_ah = mlx4_ib_destroy_ah;
+ ibdev->ib_dev.create_srq = mlx4_ib_create_srq;
+ ibdev->ib_dev.modify_srq = mlx4_ib_modify_srq;
+ ibdev->ib_dev.destroy_srq = mlx4_ib_destroy_srq;
+ ibdev->ib_dev.post_srq_recv = mlx4_ib_post_srq_recv;
+ ibdev->ib_dev.create_qp = mlx4_ib_create_qp;
+ ibdev->ib_dev.modify_qp = mlx4_ib_modify_qp;
+ ibdev->ib_dev.destroy_qp = mlx4_ib_destroy_qp;
+ ibdev->ib_dev.post_send = mlx4_ib_post_send;
+ ibdev->ib_dev.post_recv = mlx4_ib_post_recv;
+ ibdev->ib_dev.create_cq = mlx4_ib_create_cq;
+ ibdev->ib_dev.destroy_cq = mlx4_ib_destroy_cq;
+ ibdev->ib_dev.poll_cq = mlx4_ib_poll_cq;
+ ibdev->ib_dev.req_notify_cq = mlx4_ib_arm_cq;
+ ibdev->ib_dev.get_dma_mr = mlx4_ib_get_dma_mr;
+ ibdev->ib_dev.reg_user_mr = mlx4_ib_reg_user_mr;
+ ibdev->ib_dev.dereg_mr = mlx4_ib_dereg_mr;
+ ibdev->ib_dev.attach_mcast = mlx4_ib_mcg_attach;
+ ibdev->ib_dev.detach_mcast = mlx4_ib_mcg_detach;
+ ibdev->ib_dev.process_mad = mlx4_ib_process_mad;
+
+ if (init_node_data(ibdev))
+ goto err_map;
+
+ spin_lock_init(&ibdev->sm_lock);
+ mutex_init(&ibdev->cap_mask_mutex);
+
+ if (ib_register_device(&ibdev->ib_dev))
+ goto err_map;
+
+ if (mlx4_ib_mad_init(ibdev))
+ goto err_reg;
+
+ return ibdev;
+
+err_reg:
+ ib_unregister_device(&ibdev->ib_dev);
+
+err_map:
+ iounmap(ibdev->uar_map);
+
+err_uar:
+ mlx4_uar_free(dev, &ibdev->priv_uar);
+
+err_pd:
+ mlx4_pd_free(dev, ibdev->priv_pdn);
+
+err_dealloc:
+ ib_dealloc_device(&ibdev->ib_dev);
+
+ return NULL;
+}
+
+static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
+{
+ struct mlx4_ib_dev *ibdev = ibdev_ptr;
+ int p;
+
+ for (p = 1; p <= dev->caps.num_ports; ++p)
+ mlx4_CLOSE_PORT(dev, p);
+
+ mlx4_ib_mad_cleanup(ibdev);
+ ib_unregister_device(&ibdev->ib_dev);
+ iounmap(ibdev->uar_map);
+ mlx4_uar_free(dev, &ibdev->priv_uar);
+ mlx4_pd_free(dev, ibdev->priv_pdn);
+ ib_dealloc_device(&ibdev->ib_dev);
+}
+
+static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
+ enum mlx4_dev_event event, int subtype,
+ int port)
+{
+ struct ib_event ibev;
+
+ switch (event) {
+ case MLX4_EVENT_TYPE_PORT_CHANGE:
+ ibev.event = subtype == MLX4_PORT_CHANGE_SUBTYPE_ACTIVE ?
+ IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
+ break;
+
+ case MLX4_EVENT_TYPE_LOCAL_CATAS_ERROR:
+ ibev.event = IB_EVENT_DEVICE_FATAL;
+ break;
+
+ default:
+ return;
+ }
+
+ ibev.device = ibdev_ptr;
+ ibev.element.port_num = port;
+
+ ib_dispatch_event(&ibev);
+}
+
+static struct mlx4_interface mlx4_ib_interface = {
+ .add = mlx4_ib_add,
+ .remove = mlx4_ib_remove,
+ .event = mlx4_ib_event
+};
+
+static int __init mlx4_ib_init(void)
+{
+ return mlx4_register_interface(&mlx4_ib_interface);
+}
+
+static void __exit mlx4_ib_cleanup(void)
+{
+ mlx4_unregister_interface(&mlx4_ib_interface);
+}
+
+module_init(mlx4_ib_init);
+module_exit(mlx4_ib_cleanup);
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
new file mode 100644
index 00000000000..93dac71f323
--- /dev/null
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -0,0 +1,285 @@
+/*
+ * Copyright (c) 2006, 2007 Cisco Systems. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef MLX4_IB_H
+#define MLX4_IB_H
+
+#include <linux/compiler.h>
+#include <linux/list.h>
+
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_umem.h>
+
+#include <linux/mlx4/device.h>
+#include <linux/mlx4/doorbell.h>
+
+enum {
+ MLX4_IB_DB_PER_PAGE = PAGE_SIZE / 4
+};
+
+struct mlx4_ib_db_pgdir;
+struct mlx4_ib_user_db_page;
+
+struct mlx4_ib_db {
+ __be32 *db;
+ union {
+ struct mlx4_ib_db_pgdir *pgdir;
+ struct mlx4_ib_user_db_page *user_page;
+ } u;
+ dma_addr_t dma;
+ int index;
+ int order;
+};
+
+struct mlx4_ib_ucontext {
+ struct ib_ucontext ibucontext;
+ struct mlx4_uar uar;
+ struct list_head db_page_list;
+ struct mutex db_page_mutex;
+};
+
+struct mlx4_ib_pd {
+ struct ib_pd ibpd;
+ u32 pdn;
+};
+
+struct mlx4_ib_cq_buf {
+ struct mlx4_buf buf;
+ struct mlx4_mtt mtt;
+};
+
+struct mlx4_ib_cq {
+ struct ib_cq ibcq;
+ struct mlx4_cq mcq;
+ struct mlx4_ib_cq_buf buf;
+ struct mlx4_ib_db db;
+ spinlock_t lock;
+ struct ib_umem *umem;
+};
+
+struct mlx4_ib_mr {
+ struct ib_mr ibmr;
+ struct mlx4_mr mmr;
+ struct ib_umem *umem;
+};
+
+struct mlx4_ib_wq {
+ u64 *wrid;
+ spinlock_t lock;
+ int max;
+ int max_gs;
+ int offset;
+ int wqe_shift;
+ unsigned head;
+ unsigned tail;
+};
+
+struct mlx4_ib_qp {
+ struct ib_qp ibqp;
+ struct mlx4_qp mqp;
+ struct mlx4_buf buf;
+
+ struct mlx4_ib_db db;
+ struct mlx4_ib_wq rq;
+
+ u32 doorbell_qpn;
+ __be32 sq_signal_bits;
+ struct mlx4_ib_wq sq;
+
+ struct ib_umem *umem;
+ struct mlx4_mtt mtt;
+ int buf_size;
+ struct mutex mutex;
+ u8 port;
+ u8 alt_port;
+ u8 atomic_rd_en;
+ u8 resp_depth;
+ u8 state;
+};
+
+struct mlx4_ib_srq {
+ struct ib_srq ibsrq;
+ struct mlx4_srq msrq;
+ struct mlx4_buf buf;
+ struct mlx4_ib_db db;
+ u64 *wrid;
+ spinlock_t lock;
+ int head;
+ int tail;
+ u16 wqe_ctr;
+ struct ib_umem *umem;
+ struct mlx4_mtt mtt;
+ struct mutex mutex;
+};
+
+struct mlx4_ib_ah {
+ struct ib_ah ibah;
+ struct mlx4_av av;
+};
+
+struct mlx4_ib_dev {
+ struct ib_device ib_dev;
+ struct mlx4_dev *dev;
+ void __iomem *uar_map;
+
+ struct list_head pgdir_list;
+ struct mutex pgdir_mutex;
+
+ struct mlx4_uar priv_uar;
+ u32 priv_pdn;
+ MLX4_DECLARE_DOORBELL_LOCK(uar_lock);
+
+ struct ib_mad_agent *send_agent[MLX4_MAX_PORTS][2];
+ struct ib_ah *sm_ah[MLX4_MAX_PORTS];
+ spinlock_t sm_lock;
+
+ struct mutex cap_mask_mutex;
+};
+
+static inline struct mlx4_ib_dev *to_mdev(struct ib_device *ibdev)
+{
+ return container_of(ibdev, struct mlx4_ib_dev, ib_dev);
+}
+
+static inline struct mlx4_ib_ucontext *to_mucontext(struct ib_ucontext *ibucontext)
+{
+ return container_of(ibucontext, struct mlx4_ib_ucontext, ibucontext);
+}
+
+static inline struct mlx4_ib_pd *to_mpd(struct ib_pd *ibpd)
+{
+ return container_of(ibpd, struct mlx4_ib_pd, ibpd);
+}
+
+static inline struct mlx4_ib_cq *to_mcq(struct ib_cq *ibcq)
+{
+ return container_of(ibcq, struct mlx4_ib_cq, ibcq);
+}
+
+static inline struct mlx4_ib_cq *to_mibcq(struct mlx4_cq *mcq)
+{
+ return container_of(mcq, struct mlx4_ib_cq, mcq);
+}
+
+static inline struct mlx4_ib_mr *to_mmr(struct ib_mr *ibmr)
+{
+ return container_of(ibmr, struct mlx4_ib_mr, ibmr);
+}
+
+static inline struct mlx4_ib_qp *to_mqp(struct ib_qp *ibqp)
+{
+ return container_of(ibqp, struct mlx4_ib_qp, ibqp);
+}
+
+static inline struct mlx4_ib_qp *to_mibqp(struct mlx4_qp *mqp)
+{
+ return container_of(mqp, struct mlx4_ib_qp, mqp);
+}
+
+static inline struct mlx4_ib_srq *to_msrq(struct ib_srq *ibsrq)
+{
+ return container_of(ibsrq, struct mlx4_ib_srq, ibsrq);
+}
+
+static inline struct mlx4_ib_srq *to_mibsrq(struct mlx4_srq *msrq)
+{
+ return container_of(msrq, struct mlx4_ib_srq, msrq);
+}
+
+static inline struct mlx4_ib_ah *to_mah(struct ib_ah *ibah)
+{
+ return container_of(ibah, struct mlx4_ib_ah, ibah);
+}
+
+int mlx4_ib_db_alloc(struct mlx4_ib_dev *dev, struct mlx4_ib_db *db, int order);
+void mlx4_ib_db_free(struct mlx4_ib_dev *dev, struct mlx4_ib_db *db);
+int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, unsigned long virt,
+ struct mlx4_ib_db *db);
+void mlx4_ib_db_unmap_user(struct mlx4_ib_ucontext *context, struct mlx4_ib_db *db);
+
+struct ib_mr *mlx4_ib_get_dma_mr(struct ib_pd *pd, int acc);
+int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt,
+ struct ib_umem *umem);
+struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
+ u64 virt_addr, int access_flags,
+ struct ib_udata *udata);
+int mlx4_ib_dereg_mr(struct ib_mr *mr);
+
+struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector,
+ struct ib_ucontext *context,
+ struct ib_udata *udata);
+int mlx4_ib_destroy_cq(struct ib_cq *cq);
+int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
+int mlx4_ib_arm_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
+void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq);
+void mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq);
+
+struct ib_ah *mlx4_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
+int mlx4_ib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr);
+int mlx4_ib_destroy_ah(struct ib_ah *ah);
+
+struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
+ struct ib_srq_init_attr *init_attr,
+ struct ib_udata *udata);
+int mlx4_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
+ enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
+int mlx4_ib_destroy_srq(struct ib_srq *srq);
+void mlx4_ib_free_srq_wqe(struct mlx4_ib_srq *srq, int wqe_index);
+int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
+ struct ib_recv_wr **bad_wr);
+
+struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
+ struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata);
+int mlx4_ib_destroy_qp(struct ib_qp *qp);
+int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata);
+int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+ struct ib_send_wr **bad_wr);
+int mlx4_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
+ struct ib_recv_wr **bad_wr);
+
+int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int ignore_mkey, int ignore_bkey,
+ int port, struct ib_wc *in_wc, struct ib_grh *in_grh,
+ void *in_mad, void *response_mad);
+int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
+ struct ib_wc *in_wc, struct ib_grh *in_grh,
+ struct ib_mad *in_mad, struct ib_mad *out_mad);
+int mlx4_ib_mad_init(struct mlx4_ib_dev *dev);
+void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev);
+
+static inline int mlx4_ib_ah_grh_present(struct mlx4_ib_ah *ah)
+{
+ return !!(ah->av.g_slid & 0x80);
+}
+
+#endif /* MLX4_IB_H */
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
new file mode 100644
index 00000000000..85ae906f1d1
--- /dev/null
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -0,0 +1,184 @@
+/*
+ * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "mlx4_ib.h"
+
+static u32 convert_access(int acc)
+{
+ return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX4_PERM_ATOMIC : 0) |
+ (acc & IB_ACCESS_REMOTE_WRITE ? MLX4_PERM_REMOTE_WRITE : 0) |
+ (acc & IB_ACCESS_REMOTE_READ ? MLX4_PERM_REMOTE_READ : 0) |
+ (acc & IB_ACCESS_LOCAL_WRITE ? MLX4_PERM_LOCAL_WRITE : 0) |
+ MLX4_PERM_LOCAL_READ;
+}
+
+struct ib_mr *mlx4_ib_get_dma_mr(struct ib_pd *pd, int acc)
+{
+ struct mlx4_ib_mr *mr;
+ int err;
+
+ mr = kmalloc(sizeof *mr, GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ err = mlx4_mr_alloc(to_mdev(pd->device)->dev, to_mpd(pd)->pdn, 0,
+ ~0ull, convert_access(acc), 0, 0, &mr->mmr);
+ if (err)
+ goto err_free;
+
+ err = mlx4_mr_enable(to_mdev(pd->device)->dev, &mr->mmr);
+ if (err)
+ goto err_mr;
+
+ mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key;
+ mr->umem = NULL;
+
+ return &mr->ibmr;
+
+err_mr:
+ mlx4_mr_free(to_mdev(pd->device)->dev, &mr->mmr);
+
+err_free:
+ kfree(mr);
+
+ return ERR_PTR(err);
+}
+
+int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt,
+ struct ib_umem *umem)
+{
+ u64 *pages;
+ struct ib_umem_chunk *chunk;
+ int i, j, k;
+ int n;
+ int len;
+ int err = 0;
+
+ pages = (u64 *) __get_free_page(GFP_KERNEL);
+ if (!pages)
+ return -ENOMEM;
+
+ i = n = 0;
+
+ list_for_each_entry(chunk, &umem->chunk_list, list)
+ for (j = 0; j < chunk->nmap; ++j) {
+ len = sg_dma_len(&chunk->page_list[j]) >> mtt->page_shift;
+ for (k = 0; k < len; ++k) {
+ pages[i++] = sg_dma_address(&chunk->page_list[j]) +
+ umem->page_size * k;
+ /*
+ * Be friendly to WRITE_MTT firmware
+ * command, and pass it chunks of
+ * appropriate size.
+ */
+ if (i == PAGE_SIZE / sizeof (u64) - 2) {
+ err = mlx4_write_mtt(dev->dev, mtt, n,
+ i, pages);
+ if (err)
+ goto out;
+ n += i;
+ i = 0;
+ }
+ }
+ }
+
+ if (i)
+ err = mlx4_write_mtt(dev->dev, mtt, n, i, pages);
+
+out:
+ free_page((unsigned long) pages);
+ return err;
+}
+
+struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
+ u64 virt_addr, int access_flags,
+ struct ib_udata *udata)
+{
+ struct mlx4_ib_dev *dev = to_mdev(pd->device);
+ struct mlx4_ib_mr *mr;
+ int shift;
+ int err;
+ int n;
+
+ mr = kmalloc(sizeof *mr, GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ mr->umem = ib_umem_get(pd->uobject->context, start, length, access_flags);
+ if (IS_ERR(mr->umem)) {
+ err = PTR_ERR(mr->umem);
+ goto err_free;
+ }
+
+ n = ib_umem_page_count(mr->umem);
+ shift = ilog2(mr->umem->page_size);
+
+ err = mlx4_mr_alloc(dev->dev, to_mpd(pd)->pdn, virt_addr, length,
+ convert_access(access_flags), n, shift, &mr->mmr);
+ if (err)
+ goto err_umem;
+
+ err = mlx4_ib_umem_write_mtt(dev, &mr->mmr.mtt, mr->umem);
+ if (err)
+ goto err_mr;
+
+ err = mlx4_mr_enable(dev->dev, &mr->mmr);
+ if (err)
+ goto err_mr;
+
+ mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key;
+
+ return &mr->ibmr;
+
+err_mr:
+ mlx4_mr_free(to_mdev(pd->device)->dev, &mr->mmr);
+
+err_umem:
+ ib_umem_release(mr->umem);
+
+err_free:
+ kfree(mr);
+
+ return ERR_PTR(err);
+}
+
+int mlx4_ib_dereg_mr(struct ib_mr *ibmr)
+{
+ struct mlx4_ib_mr *mr = to_mmr(ibmr);
+
+ mlx4_mr_free(to_mdev(ibmr->device)->dev, &mr->mmr);
+ if (mr->umem)
+ ib_umem_release(mr->umem);
+ kfree(mr);
+
+ return 0;
+}
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
new file mode 100644
index 00000000000..5cd70690845
--- /dev/null
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -0,0 +1,1294 @@
+/*
+ * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <rdma/ib_cache.h>
+#include <rdma/ib_pack.h>
+
+#include <linux/mlx4/qp.h>
+
+#include "mlx4_ib.h"
+#include "user.h"
+
+enum {
+ MLX4_IB_ACK_REQ_FREQ = 8,
+};
+
+enum {
+ MLX4_IB_DEFAULT_SCHED_QUEUE = 0x83,
+ MLX4_IB_DEFAULT_QP0_SCHED_QUEUE = 0x3f
+};
+
+enum {
+ /*
+ * Largest possible UD header: send with GRH and immediate data.
+ */
+ MLX4_IB_UD_HEADER_SIZE = 72
+};
+
+struct mlx4_ib_sqp {
+ struct mlx4_ib_qp qp;
+ int pkey_index;
+ u32 qkey;
+ u32 send_psn;
+ struct ib_ud_header ud_header;
+ u8 header_buf[MLX4_IB_UD_HEADER_SIZE];
+};
+
+static const __be32 mlx4_ib_opcode[] = {
+ [IB_WR_SEND] = __constant_cpu_to_be32(MLX4_OPCODE_SEND),
+ [IB_WR_SEND_WITH_IMM] = __constant_cpu_to_be32(MLX4_OPCODE_SEND_IMM),
+ [IB_WR_RDMA_WRITE] = __constant_cpu_to_be32(MLX4_OPCODE_RDMA_WRITE),
+ [IB_WR_RDMA_WRITE_WITH_IMM] = __constant_cpu_to_be32(MLX4_OPCODE_RDMA_WRITE_IMM),
+ [IB_WR_RDMA_READ] = __constant_cpu_to_be32(MLX4_OPCODE_RDMA_READ),
+ [IB_WR_ATOMIC_CMP_AND_SWP] = __constant_cpu_to_be32(MLX4_OPCODE_ATOMIC_CS),
+ [IB_WR_ATOMIC_FETCH_AND_ADD] = __constant_cpu_to_be32(MLX4_OPCODE_ATOMIC_FA),
+};
+
+static struct mlx4_ib_sqp *to_msqp(struct mlx4_ib_qp *mqp)
+{
+ return container_of(mqp, struct mlx4_ib_sqp, qp);
+}
+
+static int is_sqp(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
+{
+ return qp->mqp.qpn >= dev->dev->caps.sqp_start &&
+ qp->mqp.qpn <= dev->dev->caps.sqp_start + 3;
+}
+
+static int is_qp0(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
+{
+ return qp->mqp.qpn >= dev->dev->caps.sqp_start &&
+ qp->mqp.qpn <= dev->dev->caps.sqp_start + 1;
+}
+
+static void *get_wqe(struct mlx4_ib_qp *qp, int offset)
+{
+ if (qp->buf.nbufs == 1)
+ return qp->buf.u.direct.buf + offset;
+ else
+ return qp->buf.u.page_list[offset >> PAGE_SHIFT].buf +
+ (offset & (PAGE_SIZE - 1));
+}
+
+static void *get_recv_wqe(struct mlx4_ib_qp *qp, int n)
+{
+ return get_wqe(qp, qp->rq.offset + (n << qp->rq.wqe_shift));
+}
+
+static void *get_send_wqe(struct mlx4_ib_qp *qp, int n)
+{
+ return get_wqe(qp, qp->sq.offset + (n << qp->sq.wqe_shift));
+}
+
+static void mlx4_ib_qp_event(struct mlx4_qp *qp, enum mlx4_event type)
+{
+ struct ib_event event;
+ struct ib_qp *ibqp = &to_mibqp(qp)->ibqp;
+
+ if (type == MLX4_EVENT_TYPE_PATH_MIG)
+ to_mibqp(qp)->port = to_mibqp(qp)->alt_port;
+
+ if (ibqp->event_handler) {
+ event.device = ibqp->device;
+ event.element.qp = ibqp;
+ switch (type) {
+ case MLX4_EVENT_TYPE_PATH_MIG:
+ event.event = IB_EVENT_PATH_MIG;
+ break;
+ case MLX4_EVENT_TYPE_COMM_EST:
+ event.event = IB_EVENT_COMM_EST;
+ break;
+ case MLX4_EVENT_TYPE_SQ_DRAINED:
+ event.event = IB_EVENT_SQ_DRAINED;
+ break;
+ case MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE:
+ event.event = IB_EVENT_QP_LAST_WQE_REACHED;
+ break;
+ case MLX4_EVENT_TYPE_WQ_CATAS_ERROR:
+ event.event = IB_EVENT_QP_FATAL;
+ break;
+ case MLX4_EVENT_TYPE_PATH_MIG_FAILED:
+ event.event = IB_EVENT_PATH_MIG_ERR;
+ break;
+ case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
+ event.event = IB_EVENT_QP_REQ_ERR;
+ break;
+ case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR:
+ event.event = IB_EVENT_QP_ACCESS_ERR;
+ break;
+ default:
+ printk(KERN_WARNING "mlx4_ib: Unexpected event type %d "
+ "on QP %06x\n", type, qp->qpn);
+ return;
+ }
+
+ ibqp->event_handler(&event, ibqp->qp_context);
+ }
+}
+
+static int send_wqe_overhead(enum ib_qp_type type)
+{
+ /*
+ * UD WQEs must have a datagram segment.
+ * RC and UC WQEs might have a remote address segment.
+ * MLX WQEs need two extra inline data segments (for the UD
+ * header and space for the ICRC).
+ */
+ switch (type) {
+ case IB_QPT_UD:
+ return sizeof (struct mlx4_wqe_ctrl_seg) +
+ sizeof (struct mlx4_wqe_datagram_seg);
+ case IB_QPT_UC:
+ return sizeof (struct mlx4_wqe_ctrl_seg) +
+ sizeof (struct mlx4_wqe_raddr_seg);
+ case IB_QPT_RC:
+ return sizeof (struct mlx4_wqe_ctrl_seg) +
+ sizeof (struct mlx4_wqe_atomic_seg) +
+ sizeof (struct mlx4_wqe_raddr_seg);
+ case IB_QPT_SMI:
+ case IB_QPT_GSI:
+ return sizeof (struct mlx4_wqe_ctrl_seg) +
+ ALIGN(MLX4_IB_UD_HEADER_SIZE +
+ sizeof (struct mlx4_wqe_inline_seg),
+ sizeof (struct mlx4_wqe_data_seg)) +
+ ALIGN(4 +
+ sizeof (struct mlx4_wqe_inline_seg),
+ sizeof (struct mlx4_wqe_data_seg));
+ default:
+ return sizeof (struct mlx4_wqe_ctrl_seg);
+ }
+}
+
+static int set_qp_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
+ enum ib_qp_type type, struct mlx4_ib_qp *qp)
+{
+ /* Sanity check QP size before proceeding */
+ if (cap->max_send_wr > dev->dev->caps.max_wqes ||
+ cap->max_recv_wr > dev->dev->caps.max_wqes ||
+ cap->max_send_sge > dev->dev->caps.max_sq_sg ||
+ cap->max_recv_sge > dev->dev->caps.max_rq_sg ||
+ cap->max_inline_data + send_wqe_overhead(type) +
+ sizeof (struct mlx4_wqe_inline_seg) > dev->dev->caps.max_sq_desc_sz)
+ return -EINVAL;
+
+ /*
+ * For MLX transport we need 2 extra S/G entries:
+ * one for the header and one for the checksum at the end
+ */
+ if ((type == IB_QPT_SMI || type == IB_QPT_GSI) &&
+ cap->max_send_sge + 2 > dev->dev->caps.max_sq_sg)
+ return -EINVAL;
+
+ qp->rq.max = cap->max_recv_wr ? roundup_pow_of_two(cap->max_recv_wr) : 0;
+ qp->sq.max = cap->max_send_wr ? roundup_pow_of_two(cap->max_send_wr) : 0;
+
+ qp->rq.wqe_shift = ilog2(roundup_pow_of_two(cap->max_recv_sge *
+ sizeof (struct mlx4_wqe_data_seg)));
+ qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof (struct mlx4_wqe_data_seg);
+
+ qp->sq.wqe_shift = ilog2(roundup_pow_of_two(max(cap->max_send_sge *
+ sizeof (struct mlx4_wqe_data_seg),
+ cap->max_inline_data +
+ sizeof (struct mlx4_wqe_inline_seg)) +
+ send_wqe_overhead(type)));
+ qp->sq.max_gs = ((1 << qp->sq.wqe_shift) - send_wqe_overhead(type)) /
+ sizeof (struct mlx4_wqe_data_seg);
+
+ qp->buf_size = (qp->rq.max << qp->rq.wqe_shift) +
+ (qp->sq.max << qp->sq.wqe_shift);
+ if (qp->rq.wqe_shift > qp->sq.wqe_shift) {
+ qp->rq.offset = 0;
+ qp->sq.offset = qp->rq.max << qp->rq.wqe_shift;
+ } else {
+ qp->rq.offset = qp->sq.max << qp->sq.wqe_shift;
+ qp->sq.offset = 0;
+ }
+
+ cap->max_send_wr = qp->sq.max;
+ cap->max_recv_wr = qp->rq.max;
+ cap->max_send_sge = qp->sq.max_gs;
+ cap->max_recv_sge = qp->rq.max_gs;
+ cap->max_inline_data = (1 << qp->sq.wqe_shift) - send_wqe_overhead(type) -
+ sizeof (struct mlx4_wqe_inline_seg);
+
+ return 0;
+}
+
+static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
+ struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata, int sqpn, struct mlx4_ib_qp *qp)
+{
+ struct mlx4_wqe_ctrl_seg *ctrl;
+ int err;
+ int i;
+
+ mutex_init(&qp->mutex);
+ spin_lock_init(&qp->sq.lock);
+ spin_lock_init(&qp->rq.lock);
+
+ qp->state = IB_QPS_RESET;
+ qp->atomic_rd_en = 0;
+ qp->resp_depth = 0;
+
+ qp->rq.head = 0;
+ qp->rq.tail = 0;
+ qp->sq.head = 0;
+ qp->sq.tail = 0;
+
+ err = set_qp_size(dev, &init_attr->cap, init_attr->qp_type, qp);
+ if (err)
+ goto err;
+
+ if (pd->uobject) {
+ struct mlx4_ib_create_qp ucmd;
+
+ if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
+ err = -EFAULT;
+ goto err;
+ }
+
+ qp->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr,
+ qp->buf_size, 0);
+ if (IS_ERR(qp->umem)) {
+ err = PTR_ERR(qp->umem);
+ goto err;
+ }
+
+ err = mlx4_mtt_init(dev->dev, ib_umem_page_count(qp->umem),
+ ilog2(qp->umem->page_size), &qp->mtt);
+ if (err)
+ goto err_buf;
+
+ err = mlx4_ib_umem_write_mtt(dev, &qp->mtt, qp->umem);
+ if (err)
+ goto err_mtt;
+
+ err = mlx4_ib_db_map_user(to_mucontext(pd->uobject->context),
+ ucmd.db_addr, &qp->db);
+ if (err)
+ goto err_mtt;
+ } else {
+ err = mlx4_ib_db_alloc(dev, &qp->db, 0);
+ if (err)
+ goto err;
+
+ *qp->db.db = 0;
+
+ if (mlx4_buf_alloc(dev->dev, qp->buf_size, PAGE_SIZE * 2, &qp->buf)) {
+ err = -ENOMEM;
+ goto err_db;
+ }
+
+ err = mlx4_mtt_init(dev->dev, qp->buf.npages, qp->buf.page_shift,
+ &qp->mtt);
+ if (err)
+ goto err_buf;
+
+ err = mlx4_buf_write_mtt(dev->dev, &qp->mtt, &qp->buf);
+ if (err)
+ goto err_mtt;
+
+ for (i = 0; i < qp->sq.max; ++i) {
+ ctrl = get_send_wqe(qp, i);
+ ctrl->owner_opcode = cpu_to_be32(1 << 31);
+ }
+
+ qp->sq.wrid = kmalloc(qp->sq.max * sizeof (u64), GFP_KERNEL);
+ qp->rq.wrid = kmalloc(qp->rq.max * sizeof (u64), GFP_KERNEL);
+
+ if (!qp->sq.wrid || !qp->rq.wrid) {
+ err = -ENOMEM;
+ goto err_wrid;
+ }
+
+ /* We don't support inline sends for kernel QPs (yet) */
+ init_attr->cap.max_inline_data = 0;
+ }
+
+ err = mlx4_qp_alloc(dev->dev, sqpn, &qp->mqp);
+ if (err)
+ goto err_wrid;
+
+ /*
+ * Hardware wants QPN written in big-endian order (after
+ * shifting) for send doorbell. Precompute this value to save
+ * a little bit when posting sends.
+ */
+ qp->doorbell_qpn = swab32(qp->mqp.qpn << 8);
+
+ if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
+ qp->sq_signal_bits = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE);
+ else
+ qp->sq_signal_bits = 0;
+
+ qp->mqp.event = mlx4_ib_qp_event;
+
+ return 0;
+
+err_wrid:
+ if (pd->uobject)
+ mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), &qp->db);
+ else {
+ kfree(qp->sq.wrid);
+ kfree(qp->rq.wrid);
+ }
+
+err_mtt:
+ mlx4_mtt_cleanup(dev->dev, &qp->mtt);
+
+err_buf:
+ if (pd->uobject)
+ ib_umem_release(qp->umem);
+ else
+ mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf);
+
+err_db:
+ if (!pd->uobject)
+ mlx4_ib_db_free(dev, &qp->db);
+
+err:
+ return err;
+}
+
+static enum mlx4_qp_state to_mlx4_state(enum ib_qp_state state)
+{
+ switch (state) {
+ case IB_QPS_RESET: return MLX4_QP_STATE_RST;
+ case IB_QPS_INIT: return MLX4_QP_STATE_INIT;
+ case IB_QPS_RTR: return MLX4_QP_STATE_RTR;
+ case IB_QPS_RTS: return MLX4_QP_STATE_RTS;
+ case IB_QPS_SQD: return MLX4_QP_STATE_SQD;
+ case IB_QPS_SQE: return MLX4_QP_STATE_SQER;
+ case IB_QPS_ERR: return MLX4_QP_STATE_ERR;
+ default: return -1;
+ }
+}
+
+static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq)
+{
+ if (send_cq == recv_cq)
+ spin_lock_irq(&send_cq->lock);
+ else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
+ spin_lock_irq(&send_cq->lock);
+ spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
+ } else {
+ spin_lock_irq(&recv_cq->lock);
+ spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING);
+ }
+}
+
+static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq)
+{
+ if (send_cq == recv_cq)
+ spin_unlock_irq(&send_cq->lock);
+ else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
+ spin_unlock(&recv_cq->lock);
+ spin_unlock_irq(&send_cq->lock);
+ } else {
+ spin_unlock(&send_cq->lock);
+ spin_unlock_irq(&recv_cq->lock);
+ }
+}
+
+static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
+ int is_user)
+{
+ struct mlx4_ib_cq *send_cq, *recv_cq;
+
+ if (qp->state != IB_QPS_RESET)
+ if (mlx4_qp_modify(dev->dev, NULL, to_mlx4_state(qp->state),
+ MLX4_QP_STATE_RST, NULL, 0, 0, &qp->mqp))
+ printk(KERN_WARNING "mlx4_ib: modify QP %06x to RESET failed.\n",
+ qp->mqp.qpn);
+
+ send_cq = to_mcq(qp->ibqp.send_cq);
+ recv_cq = to_mcq(qp->ibqp.recv_cq);
+
+ mlx4_ib_lock_cqs(send_cq, recv_cq);
+
+ if (!is_user) {
+ __mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn,
+ qp->ibqp.srq ? to_msrq(qp->ibqp.srq): NULL);
+ if (send_cq != recv_cq)
+ __mlx4_ib_cq_clean(send_cq, qp->mqp.qpn, NULL);
+ }
+
+ mlx4_qp_remove(dev->dev, &qp->mqp);
+
+ mlx4_ib_unlock_cqs(send_cq, recv_cq);
+
+ mlx4_qp_free(dev->dev, &qp->mqp);
+ mlx4_mtt_cleanup(dev->dev, &qp->mtt);
+
+ if (is_user) {
+ mlx4_ib_db_unmap_user(to_mucontext(qp->ibqp.uobject->context),
+ &qp->db);
+ ib_umem_release(qp->umem);
+ } else {
+ kfree(qp->sq.wrid);
+ kfree(qp->rq.wrid);
+ mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf);
+ mlx4_ib_db_free(dev, &qp->db);
+ }
+}
+
+struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
+ struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata)
+{
+ struct mlx4_ib_dev *dev = to_mdev(pd->device);
+ struct mlx4_ib_sqp *sqp;
+ struct mlx4_ib_qp *qp;
+ int err;
+
+ switch (init_attr->qp_type) {
+ case IB_QPT_RC:
+ case IB_QPT_UC:
+ case IB_QPT_UD:
+ {
+ qp = kmalloc(sizeof *qp, GFP_KERNEL);
+ if (!qp)
+ return ERR_PTR(-ENOMEM);
+
+ err = create_qp_common(dev, pd, init_attr, udata, 0, qp);
+ if (err) {
+ kfree(qp);
+ return ERR_PTR(err);
+ }
+
+ qp->ibqp.qp_num = qp->mqp.qpn;
+
+ break;
+ }
+ case IB_QPT_SMI:
+ case IB_QPT_GSI:
+ {
+ /* Userspace is not allowed to create special QPs: */
+ if (pd->uobject)
+ return ERR_PTR(-EINVAL);
+
+ sqp = kmalloc(sizeof *sqp, GFP_KERNEL);
+ if (!sqp)
+ return ERR_PTR(-ENOMEM);
+
+ qp = &sqp->qp;
+
+ err = create_qp_common(dev, pd, init_attr, udata,
+ dev->dev->caps.sqp_start +
+ (init_attr->qp_type == IB_QPT_SMI ? 0 : 2) +
+ init_attr->port_num - 1,
+ qp);
+ if (err) {
+ kfree(sqp);
+ return ERR_PTR(err);
+ }
+
+ qp->port = init_attr->port_num;
+ qp->ibqp.qp_num = init_attr->qp_type == IB_QPT_SMI ? 0 : 1;
+
+ break;
+ }
+ default:
+ /* Don't support raw QPs */
+ return ERR_PTR(-EINVAL);
+ }
+
+ return &qp->ibqp;
+}
+
+int mlx4_ib_destroy_qp(struct ib_qp *qp)
+{
+ struct mlx4_ib_dev *dev = to_mdev(qp->device);
+ struct mlx4_ib_qp *mqp = to_mqp(qp);
+
+ if (is_qp0(dev, mqp))
+ mlx4_CLOSE_PORT(dev->dev, mqp->port);
+
+ destroy_qp_common(dev, mqp, !!qp->pd->uobject);
+
+ if (is_sqp(dev, mqp))
+ kfree(to_msqp(mqp));
+ else
+ kfree(mqp);
+
+ return 0;
+}
+
+static void init_port(struct mlx4_ib_dev *dev, int port)
+{
+ struct mlx4_init_port_param param;
+ int err;
+
+ memset(&param, 0, sizeof param);
+
+ param.port_width_cap = dev->dev->caps.port_width_cap;
+ param.vl_cap = dev->dev->caps.vl_cap;
+ param.mtu = ib_mtu_enum_to_int(dev->dev->caps.mtu_cap);
+ param.max_gid = dev->dev->caps.gid_table_len;
+ param.max_pkey = dev->dev->caps.pkey_table_len;
+
+ err = mlx4_INIT_PORT(dev->dev, &param, port);
+ if (err)
+ printk(KERN_WARNING "INIT_PORT failed, return code %d.\n", err);
+}
+
+static int to_mlx4_st(enum ib_qp_type type)
+{
+ switch (type) {
+ case IB_QPT_RC: return MLX4_QP_ST_RC;
+ case IB_QPT_UC: return MLX4_QP_ST_UC;
+ case IB_QPT_UD: return MLX4_QP_ST_UD;
+ case IB_QPT_SMI:
+ case IB_QPT_GSI: return MLX4_QP_ST_MLX;
+ default: return -1;
+ }
+}
+
+static __be32 to_mlx4_access_flags(struct mlx4_ib_qp *qp, struct ib_qp_attr *attr,
+ int attr_mask)
+{
+ u8 dest_rd_atomic;
+ u32 access_flags;
+ u32 hw_access_flags = 0;
+
+ if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
+ dest_rd_atomic = attr->max_dest_rd_atomic;
+ else
+ dest_rd_atomic = qp->resp_depth;
+
+ if (attr_mask & IB_QP_ACCESS_FLAGS)
+ access_flags = attr->qp_access_flags;
+ else
+ access_flags = qp->atomic_rd_en;
+
+ if (!dest_rd_atomic)
+ access_flags &= IB_ACCESS_REMOTE_WRITE;
+
+ if (access_flags & IB_ACCESS_REMOTE_READ)
+ hw_access_flags |= MLX4_QP_BIT_RRE;
+ if (access_flags & IB_ACCESS_REMOTE_ATOMIC)
+ hw_access_flags |= MLX4_QP_BIT_RAE;
+ if (access_flags & IB_ACCESS_REMOTE_WRITE)
+ hw_access_flags |= MLX4_QP_BIT_RWE;
+
+ return cpu_to_be32(hw_access_flags);
+}
+
+static void store_sqp_attrs(struct mlx4_ib_sqp *sqp, struct ib_qp_attr *attr,
+ int attr_mask)
+{
+ if (attr_mask & IB_QP_PKEY_INDEX)
+ sqp->pkey_index = attr->pkey_index;
+ if (attr_mask & IB_QP_QKEY)
+ sqp->qkey = attr->qkey;
+ if (attr_mask & IB_QP_SQ_PSN)
+ sqp->send_psn = attr->sq_psn;
+}
+
+static void mlx4_set_sched(struct mlx4_qp_path *path, u8 port)
+{
+ path->sched_queue = (path->sched_queue & 0xbf) | ((port - 1) << 6);
+}
+
+static int mlx4_set_path(struct mlx4_ib_dev *dev, struct ib_ah_attr *ah,
+ struct mlx4_qp_path *path, u8 port)
+{
+ path->grh_mylmc = ah->src_path_bits & 0x7f;
+ path->rlid = cpu_to_be16(ah->dlid);
+ if (ah->static_rate) {
+ path->static_rate = ah->static_rate + MLX4_STAT_RATE_OFFSET;
+ while (path->static_rate > IB_RATE_2_5_GBPS + MLX4_STAT_RATE_OFFSET &&
+ !(1 << path->static_rate & dev->dev->caps.stat_rate_support))
+ --path->static_rate;
+ } else
+ path->static_rate = 0;
+ path->counter_index = 0xff;
+
+ if (ah->ah_flags & IB_AH_GRH) {
+ if (ah->grh.sgid_index >= dev->dev->caps.gid_table_len) {
+ printk(KERN_ERR "sgid_index (%u) too large. max is %d\n",
+ ah->grh.sgid_index, dev->dev->caps.gid_table_len - 1);
+ return -1;
+ }
+
+ path->grh_mylmc |= 1 << 7;
+ path->mgid_index = ah->grh.sgid_index;
+ path->hop_limit = ah->grh.hop_limit;
+ path->tclass_flowlabel =
+ cpu_to_be32((ah->grh.traffic_class << 20) |
+ (ah->grh.flow_label));
+ memcpy(path->rgid, ah->grh.dgid.raw, 16);
+ }
+
+ path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE |
+ ((port - 1) << 6) | ((ah->sl & 0xf) << 2);
+
+ return 0;
+}
+
+int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata)
+{
+ struct mlx4_ib_dev *dev = to_mdev(ibqp->device);
+ struct mlx4_ib_qp *qp = to_mqp(ibqp);
+ struct mlx4_qp_context *context;
+ enum mlx4_qp_optpar optpar = 0;
+ enum ib_qp_state cur_state, new_state;
+ int sqd_event;
+ int err = -EINVAL;
+
+ context = kzalloc(sizeof *context, GFP_KERNEL);
+ if (!context)
+ return -ENOMEM;
+
+ mutex_lock(&qp->mutex);
+
+ cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state;
+ new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
+
+ if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask))
+ goto out;
+
+ if ((attr_mask & IB_QP_PKEY_INDEX) &&
+ attr->pkey_index >= dev->dev->caps.pkey_table_len) {
+ goto out;
+ }
+
+ if ((attr_mask & IB_QP_PORT) &&
+ (attr->port_num == 0 || attr->port_num > dev->dev->caps.num_ports)) {
+ goto out;
+ }
+
+ if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
+ attr->max_rd_atomic > dev->dev->caps.max_qp_init_rdma) {
+ goto out;
+ }
+
+ if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
+ attr->max_dest_rd_atomic > 1 << dev->dev->caps.max_qp_dest_rdma) {
+ goto out;
+ }
+
+ context->flags = cpu_to_be32((to_mlx4_state(new_state) << 28) |
+ (to_mlx4_st(ibqp->qp_type) << 16));
+ context->flags |= cpu_to_be32(1 << 8); /* DE? */
+
+ if (!(attr_mask & IB_QP_PATH_MIG_STATE))
+ context->flags |= cpu_to_be32(MLX4_QP_PM_MIGRATED << 11);
+ else {
+ optpar |= MLX4_QP_OPTPAR_PM_STATE;
+ switch (attr->path_mig_state) {
+ case IB_MIG_MIGRATED:
+ context->flags |= cpu_to_be32(MLX4_QP_PM_MIGRATED << 11);
+ break;
+ case IB_MIG_REARM:
+ context->flags |= cpu_to_be32(MLX4_QP_PM_REARM << 11);
+ break;
+ case IB_MIG_ARMED:
+ context->flags |= cpu_to_be32(MLX4_QP_PM_ARMED << 11);
+ break;
+ }
+ }
+
+ if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI ||
+ ibqp->qp_type == IB_QPT_UD)
+ context->mtu_msgmax = (IB_MTU_4096 << 5) | 11;
+ else if (attr_mask & IB_QP_PATH_MTU) {
+ if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_4096) {
+ printk(KERN_ERR "path MTU (%u) is invalid\n",
+ attr->path_mtu);
+ return -EINVAL;
+ }
+ context->mtu_msgmax = (attr->path_mtu << 5) | 31;
+ }
+
+ if (qp->rq.max)
+ context->rq_size_stride = ilog2(qp->rq.max) << 3;
+ context->rq_size_stride |= qp->rq.wqe_shift - 4;
+
+ if (qp->sq.max)
+ context->sq_size_stride = ilog2(qp->sq.max) << 3;
+ context->sq_size_stride |= qp->sq.wqe_shift - 4;
+
+ if (qp->ibqp.uobject)
+ context->usr_page = cpu_to_be32(to_mucontext(ibqp->uobject->context)->uar.index);
+ else
+ context->usr_page = cpu_to_be32(dev->priv_uar.index);
+
+ if (attr_mask & IB_QP_DEST_QPN)
+ context->remote_qpn = cpu_to_be32(attr->dest_qp_num);
+
+ if (attr_mask & IB_QP_PORT) {
+ if (cur_state == IB_QPS_SQD && new_state == IB_QPS_SQD &&
+ !(attr_mask & IB_QP_AV)) {
+ mlx4_set_sched(&context->pri_path, attr->port_num);
+ optpar |= MLX4_QP_OPTPAR_SCHED_QUEUE;
+ }
+ }
+
+ if (attr_mask & IB_QP_PKEY_INDEX) {
+ context->pri_path.pkey_index = attr->pkey_index;
+ optpar |= MLX4_QP_OPTPAR_PKEY_INDEX;
+ }
+
+ if (attr_mask & IB_QP_RNR_RETRY) {
+ context->params1 |= cpu_to_be32(attr->rnr_retry << 13);
+ optpar |= MLX4_QP_OPTPAR_RNR_RETRY;
+ }
+
+ if (attr_mask & IB_QP_AV) {
+ if (mlx4_set_path(dev, &attr->ah_attr, &context->pri_path,
+ attr_mask & IB_QP_PORT ? attr->port_num : qp->port)) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ optpar |= (MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH |
+ MLX4_QP_OPTPAR_SCHED_QUEUE);
+ }
+
+ if (attr_mask & IB_QP_TIMEOUT) {
+ context->pri_path.ackto = attr->timeout << 3;
+ optpar |= MLX4_QP_OPTPAR_ACK_TIMEOUT;
+ }
+
+ if (attr_mask & IB_QP_ALT_PATH) {
+ if (attr->alt_pkey_index >= dev->dev->caps.pkey_table_len)
+ return -EINVAL;
+
+ if (attr->alt_port_num == 0 ||
+ attr->alt_port_num > dev->dev->caps.num_ports)
+ return -EINVAL;
+
+ if (mlx4_set_path(dev, &attr->alt_ah_attr, &context->alt_path,
+ attr->alt_port_num))
+ return -EINVAL;
+
+ context->alt_path.pkey_index = attr->alt_pkey_index;
+ context->alt_path.ackto = attr->alt_timeout << 3;
+ optpar |= MLX4_QP_OPTPAR_ALT_ADDR_PATH;
+ }
+
+ context->pd = cpu_to_be32(to_mpd(ibqp->pd)->pdn);
+ context->params1 = cpu_to_be32(MLX4_IB_ACK_REQ_FREQ << 28);
+ if (attr_mask & IB_QP_RETRY_CNT) {
+ context->params1 |= cpu_to_be32(attr->retry_cnt << 16);
+ optpar |= MLX4_QP_OPTPAR_RETRY_COUNT;
+ }
+
+ if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
+ if (attr->max_rd_atomic)
+ context->params1 |=
+ cpu_to_be32(fls(attr->max_rd_atomic - 1) << 21);
+ optpar |= MLX4_QP_OPTPAR_SRA_MAX;
+ }
+
+ if (attr_mask & IB_QP_SQ_PSN)
+ context->next_send_psn = cpu_to_be32(attr->sq_psn);
+
+ context->cqn_send = cpu_to_be32(to_mcq(ibqp->send_cq)->mcq.cqn);
+
+ if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
+ if (attr->max_dest_rd_atomic)
+ context->params2 |=
+ cpu_to_be32(fls(attr->max_dest_rd_atomic - 1) << 21);
+ optpar |= MLX4_QP_OPTPAR_RRA_MAX;
+ }
+
+ if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC)) {
+ context->params2 |= to_mlx4_access_flags(qp, attr, attr_mask);
+ optpar |= MLX4_QP_OPTPAR_RWE | MLX4_QP_OPTPAR_RRE | MLX4_QP_OPTPAR_RAE;
+ }
+
+ if (ibqp->srq)
+ context->params2 |= cpu_to_be32(MLX4_QP_BIT_RIC);
+
+ if (attr_mask & IB_QP_MIN_RNR_TIMER) {
+ context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24);
+ optpar |= MLX4_QP_OPTPAR_RNR_TIMEOUT;
+ }
+ if (attr_mask & IB_QP_RQ_PSN)
+ context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn);
+
+ context->cqn_recv = cpu_to_be32(to_mcq(ibqp->recv_cq)->mcq.cqn);
+
+ if (attr_mask & IB_QP_QKEY) {
+ context->qkey = cpu_to_be32(attr->qkey);
+ optpar |= MLX4_QP_OPTPAR_Q_KEY;
+ }
+
+ if (ibqp->srq)
+ context->srqn = cpu_to_be32(1 << 24 | to_msrq(ibqp->srq)->msrq.srqn);
+
+ if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
+ context->db_rec_addr = cpu_to_be64(qp->db.dma);
+
+ if (cur_state == IB_QPS_INIT &&
+ new_state == IB_QPS_RTR &&
+ (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI ||
+ ibqp->qp_type == IB_QPT_UD)) {
+ context->pri_path.sched_queue = (qp->port - 1) << 6;
+ if (is_qp0(dev, qp))
+ context->pri_path.sched_queue |= MLX4_IB_DEFAULT_QP0_SCHED_QUEUE;
+ else
+ context->pri_path.sched_queue |= MLX4_IB_DEFAULT_SCHED_QUEUE;
+ }
+
+ if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD &&
+ attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY && attr->en_sqd_async_notify)
+ sqd_event = 1;
+ else
+ sqd_event = 0;
+
+ err = mlx4_qp_modify(dev->dev, &qp->mtt, to_mlx4_state(cur_state),
+ to_mlx4_state(new_state), context, optpar,
+ sqd_event, &qp->mqp);
+ if (err)
+ goto out;
+
+ qp->state = new_state;
+
+ if (attr_mask & IB_QP_ACCESS_FLAGS)
+ qp->atomic_rd_en = attr->qp_access_flags;
+ if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
+ qp->resp_depth = attr->max_dest_rd_atomic;
+ if (attr_mask & IB_QP_PORT)
+ qp->port = attr->port_num;
+ if (attr_mask & IB_QP_ALT_PATH)
+ qp->alt_port = attr->alt_port_num;
+
+ if (is_sqp(dev, qp))
+ store_sqp_attrs(to_msqp(qp), attr, attr_mask);
+
+ /*
+ * If we moved QP0 to RTR, bring the IB link up; if we moved
+ * QP0 to RESET or ERROR, bring the link back down.
+ */
+ if (is_qp0(dev, qp)) {
+ if (cur_state != IB_QPS_RTR && new_state == IB_QPS_RTR)
+ init_port(dev, qp->port);
+
+ if (cur_state != IB_QPS_RESET && cur_state != IB_QPS_ERR &&
+ (new_state == IB_QPS_RESET || new_state == IB_QPS_ERR))
+ mlx4_CLOSE_PORT(dev->dev, qp->port);
+ }
+
+ /*
+ * If we moved a kernel QP to RESET, clean up all old CQ
+ * entries and reinitialize the QP.
+ */
+ if (new_state == IB_QPS_RESET && !ibqp->uobject) {
+ mlx4_ib_cq_clean(to_mcq(ibqp->recv_cq), qp->mqp.qpn,
+ ibqp->srq ? to_msrq(ibqp->srq): NULL);
+ if (ibqp->send_cq != ibqp->recv_cq)
+ mlx4_ib_cq_clean(to_mcq(ibqp->send_cq), qp->mqp.qpn, NULL);
+
+ qp->rq.head = 0;
+ qp->rq.tail = 0;
+ qp->sq.head = 0;
+ qp->sq.tail = 0;
+ *qp->db.db = 0;
+ }
+
+out:
+ mutex_unlock(&qp->mutex);
+ kfree(context);
+ return err;
+}
+
+static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
+ void *wqe)
+{
+ struct ib_device *ib_dev = &to_mdev(sqp->qp.ibqp.device)->ib_dev;
+ struct mlx4_wqe_mlx_seg *mlx = wqe;
+ struct mlx4_wqe_inline_seg *inl = wqe + sizeof *mlx;
+ struct mlx4_ib_ah *ah = to_mah(wr->wr.ud.ah);
+ u16 pkey;
+ int send_size;
+ int header_size;
+ int i;
+
+ send_size = 0;
+ for (i = 0; i < wr->num_sge; ++i)
+ send_size += wr->sg_list[i].length;
+
+ ib_ud_header_init(send_size, mlx4_ib_ah_grh_present(ah), &sqp->ud_header);
+
+ sqp->ud_header.lrh.service_level =
+ be32_to_cpu(ah->av.sl_tclass_flowlabel) >> 28;
+ sqp->ud_header.lrh.destination_lid = ah->av.dlid;
+ sqp->ud_header.lrh.source_lid = cpu_to_be16(ah->av.g_slid & 0x7f);
+ if (mlx4_ib_ah_grh_present(ah)) {
+ sqp->ud_header.grh.traffic_class =
+ (be32_to_cpu(ah->av.sl_tclass_flowlabel) >> 20) & 0xff;
+ sqp->ud_header.grh.flow_label =
+ ah->av.sl_tclass_flowlabel & cpu_to_be32(0xfffff);
+ ib_get_cached_gid(ib_dev, be32_to_cpu(ah->av.port_pd) >> 24,
+ ah->av.gid_index, &sqp->ud_header.grh.source_gid);
+ memcpy(sqp->ud_header.grh.destination_gid.raw,
+ ah->av.dgid, 16);
+ }
+
+ mlx->flags &= cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE);
+ mlx->flags |= cpu_to_be32((!sqp->qp.ibqp.qp_num ? MLX4_WQE_MLX_VL15 : 0) |
+ (sqp->ud_header.lrh.destination_lid ==
+ IB_LID_PERMISSIVE ? MLX4_WQE_MLX_SLR : 0) |
+ (sqp->ud_header.lrh.service_level << 8));
+ mlx->rlid = sqp->ud_header.lrh.destination_lid;
+
+ switch (wr->opcode) {
+ case IB_WR_SEND:
+ sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY;
+ sqp->ud_header.immediate_present = 0;
+ break;
+ case IB_WR_SEND_WITH_IMM:
+ sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
+ sqp->ud_header.immediate_present = 1;
+ sqp->ud_header.immediate_data = wr->imm_data;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 : 0;
+ if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE)
+ sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE;
+ sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED);
+ if (!sqp->qp.ibqp.qp_num)
+ ib_get_cached_pkey(ib_dev, sqp->qp.port, sqp->pkey_index, &pkey);
+ else
+ ib_get_cached_pkey(ib_dev, sqp->qp.port, wr->wr.ud.pkey_index, &pkey);
+ sqp->ud_header.bth.pkey = cpu_to_be16(pkey);
+ sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn);
+ sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1));
+ sqp->ud_header.deth.qkey = cpu_to_be32(wr->wr.ud.remote_qkey & 0x80000000 ?
+ sqp->qkey : wr->wr.ud.remote_qkey);
+ sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.ibqp.qp_num);
+
+ header_size = ib_ud_header_pack(&sqp->ud_header, sqp->header_buf);
+
+ if (0) {
+ printk(KERN_ERR "built UD header of size %d:\n", header_size);
+ for (i = 0; i < header_size / 4; ++i) {
+ if (i % 8 == 0)
+ printk(" [%02x] ", i * 4);
+ printk(" %08x",
+ be32_to_cpu(((__be32 *) sqp->header_buf)[i]));
+ if ((i + 1) % 8 == 0)
+ printk("\n");
+ }
+ printk("\n");
+ }
+
+ inl->byte_count = cpu_to_be32(1 << 31 | header_size);
+ memcpy(inl + 1, sqp->header_buf, header_size);
+
+ return ALIGN(sizeof (struct mlx4_wqe_inline_seg) + header_size, 16);
+}
+
+static int mlx4_wq_overflow(struct mlx4_ib_wq *wq, int nreq, struct ib_cq *ib_cq)
+{
+ unsigned cur;
+ struct mlx4_ib_cq *cq;
+
+ cur = wq->head - wq->tail;
+ if (likely(cur + nreq < wq->max))
+ return 0;
+
+ cq = to_mcq(ib_cq);
+ spin_lock(&cq->lock);
+ cur = wq->head - wq->tail;
+ spin_unlock(&cq->lock);
+
+ return cur + nreq >= wq->max;
+}
+
+int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+ struct ib_send_wr **bad_wr)
+{
+ struct mlx4_ib_qp *qp = to_mqp(ibqp);
+ void *wqe;
+ struct mlx4_wqe_ctrl_seg *ctrl;
+ unsigned long flags;
+ int nreq;
+ int err = 0;
+ int ind;
+ int size;
+ int i;
+
+ spin_lock_irqsave(&qp->rq.lock, flags);
+
+ ind = qp->sq.head;
+
+ for (nreq = 0; wr; ++nreq, wr = wr->next) {
+ if (mlx4_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
+ err = -ENOMEM;
+ *bad_wr = wr;
+ goto out;
+ }
+
+ if (unlikely(wr->num_sge > qp->sq.max_gs)) {
+ err = -EINVAL;
+ *bad_wr = wr;
+ goto out;
+ }
+
+ ctrl = wqe = get_send_wqe(qp, ind & (qp->sq.max - 1));
+ qp->sq.wrid[ind & (qp->sq.max - 1)] = wr->wr_id;
+
+ ctrl->srcrb_flags =
+ (wr->send_flags & IB_SEND_SIGNALED ?
+ cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE) : 0) |
+ (wr->send_flags & IB_SEND_SOLICITED ?
+ cpu_to_be32(MLX4_WQE_CTRL_SOLICITED) : 0) |
+ qp->sq_signal_bits;
+
+ if (wr->opcode == IB_WR_SEND_WITH_IMM ||
+ wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM)
+ ctrl->imm = wr->imm_data;
+ else
+ ctrl->imm = 0;
+
+ wqe += sizeof *ctrl;
+ size = sizeof *ctrl / 16;
+
+ switch (ibqp->qp_type) {
+ case IB_QPT_RC:
+ case IB_QPT_UC:
+ switch (wr->opcode) {
+ case IB_WR_ATOMIC_CMP_AND_SWP:
+ case IB_WR_ATOMIC_FETCH_AND_ADD:
+ ((struct mlx4_wqe_raddr_seg *) wqe)->raddr =
+ cpu_to_be64(wr->wr.atomic.remote_addr);
+ ((struct mlx4_wqe_raddr_seg *) wqe)->rkey =
+ cpu_to_be32(wr->wr.atomic.rkey);
+ ((struct mlx4_wqe_raddr_seg *) wqe)->reserved = 0;
+
+ wqe += sizeof (struct mlx4_wqe_raddr_seg);
+
+ if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
+ ((struct mlx4_wqe_atomic_seg *) wqe)->swap_add =
+ cpu_to_be64(wr->wr.atomic.swap);
+ ((struct mlx4_wqe_atomic_seg *) wqe)->compare =
+ cpu_to_be64(wr->wr.atomic.compare_add);
+ } else {
+ ((struct mlx4_wqe_atomic_seg *) wqe)->swap_add =
+ cpu_to_be64(wr->wr.atomic.compare_add);
+ ((struct mlx4_wqe_atomic_seg *) wqe)->compare = 0;
+ }
+
+ wqe += sizeof (struct mlx4_wqe_atomic_seg);
+ size += (sizeof (struct mlx4_wqe_raddr_seg) +
+ sizeof (struct mlx4_wqe_atomic_seg)) / 16;
+
+ break;
+
+ case IB_WR_RDMA_READ:
+ case IB_WR_RDMA_WRITE:
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ ((struct mlx4_wqe_raddr_seg *) wqe)->raddr =
+ cpu_to_be64(wr->wr.rdma.remote_addr);
+ ((struct mlx4_wqe_raddr_seg *) wqe)->rkey =
+ cpu_to_be32(wr->wr.rdma.rkey);
+ ((struct mlx4_wqe_raddr_seg *) wqe)->reserved = 0;
+
+ wqe += sizeof (struct mlx4_wqe_raddr_seg);
+ size += sizeof (struct mlx4_wqe_raddr_seg) / 16;
+
+ break;
+
+ default:
+ /* No extra segments required for sends */
+ break;
+ }
+ break;
+
+ case IB_QPT_UD:
+ memcpy(((struct mlx4_wqe_datagram_seg *) wqe)->av,
+ &to_mah(wr->wr.ud.ah)->av, sizeof (struct mlx4_av));
+ ((struct mlx4_wqe_datagram_seg *) wqe)->dqpn =
+ cpu_to_be32(wr->wr.ud.remote_qpn);
+ ((struct mlx4_wqe_datagram_seg *) wqe)->qkey =
+ cpu_to_be32(wr->wr.ud.remote_qkey);
+
+ wqe += sizeof (struct mlx4_wqe_datagram_seg);
+ size += sizeof (struct mlx4_wqe_datagram_seg) / 16;
+ break;
+
+ case IB_QPT_SMI:
+ case IB_QPT_GSI:
+ err = build_mlx_header(to_msqp(qp), wr, ctrl);
+ if (err < 0) {
+ *bad_wr = wr;
+ goto out;
+ }
+ wqe += err;
+ size += err / 16;
+
+ err = 0;
+ break;
+
+ default:
+ break;
+ }
+
+ for (i = 0; i < wr->num_sge; ++i) {
+ ((struct mlx4_wqe_data_seg *) wqe)->byte_count =
+ cpu_to_be32(wr->sg_list[i].length);
+ ((struct mlx4_wqe_data_seg *) wqe)->lkey =
+ cpu_to_be32(wr->sg_list[i].lkey);
+ ((struct mlx4_wqe_data_seg *) wqe)->addr =
+ cpu_to_be64(wr->sg_list[i].addr);
+
+ wqe += sizeof (struct mlx4_wqe_data_seg);
+ size += sizeof (struct mlx4_wqe_data_seg) / 16;
+ }
+
+ /* Add one more inline data segment for ICRC for MLX sends */
+ if (qp->ibqp.qp_type == IB_QPT_SMI || qp->ibqp.qp_type == IB_QPT_GSI) {
+ ((struct mlx4_wqe_inline_seg *) wqe)->byte_count =
+ cpu_to_be32((1 << 31) | 4);
+ ((u32 *) wqe)[1] = 0;
+ wqe += sizeof (struct mlx4_wqe_data_seg);
+ size += sizeof (struct mlx4_wqe_data_seg) / 16;
+ }
+
+ ctrl->fence_size = (wr->send_flags & IB_SEND_FENCE ?
+ MLX4_WQE_CTRL_FENCE : 0) | size;
+
+ /*
+ * Make sure descriptor is fully written before
+ * setting ownership bit (because HW can start
+ * executing as soon as we do).
+ */
+ wmb();
+
+ if (wr->opcode < 0 || wr->opcode > ARRAY_SIZE(mlx4_ib_opcode)) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ ctrl->owner_opcode = mlx4_ib_opcode[wr->opcode] |
+ (ind & qp->sq.max ? cpu_to_be32(1 << 31) : 0);
+
+ ++ind;
+ }
+
+out:
+ if (likely(nreq)) {
+ qp->sq.head += nreq;
+
+ /*
+ * Make sure that descriptors are written before
+ * doorbell record.
+ */
+ wmb();
+
+ writel(qp->doorbell_qpn,
+ to_mdev(ibqp->device)->uar_map + MLX4_SEND_DOORBELL);
+
+ /*
+ * Make sure doorbells don't leak out of SQ spinlock
+ * and reach the HCA out of order.
+ */
+ mmiowb();
+ }
+
+ spin_unlock_irqrestore(&qp->rq.lock, flags);
+
+ return err;
+}
+
+int mlx4_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
+ struct ib_recv_wr **bad_wr)
+{
+ struct mlx4_ib_qp *qp = to_mqp(ibqp);
+ struct mlx4_wqe_data_seg *scat;
+ unsigned long flags;
+ int err = 0;
+ int nreq;
+ int ind;
+ int i;
+
+ spin_lock_irqsave(&qp->rq.lock, flags);
+
+ ind = qp->rq.head & (qp->rq.max - 1);
+
+ for (nreq = 0; wr; ++nreq, wr = wr->next) {
+ if (mlx4_wq_overflow(&qp->rq, nreq, qp->ibqp.send_cq)) {
+ err = -ENOMEM;
+ *bad_wr = wr;
+ goto out;
+ }
+
+ if (unlikely(wr->num_sge > qp->rq.max_gs)) {
+ err = -EINVAL;
+ *bad_wr = wr;
+ goto out;
+ }
+
+ scat = get_recv_wqe(qp, ind);
+
+ for (i = 0; i < wr->num_sge; ++i) {
+ scat[i].byte_count = cpu_to_be32(wr->sg_list[i].length);
+ scat[i].lkey = cpu_to_be32(wr->sg_list[i].lkey);
+ scat[i].addr = cpu_to_be64(wr->sg_list[i].addr);
+ }
+
+ if (i < qp->rq.max_gs) {
+ scat[i].byte_count = 0;
+ scat[i].lkey = cpu_to_be32(MLX4_INVALID_LKEY);
+ scat[i].addr = 0;
+ }
+
+ qp->rq.wrid[ind] = wr->wr_id;
+
+ ind = (ind + 1) & (qp->rq.max - 1);
+ }
+
+out:
+ if (likely(nreq)) {
+ qp->rq.head += nreq;
+
+ /*
+ * Make sure that descriptors are written before
+ * doorbell record.
+ */
+ wmb();
+
+ *qp->db.db = cpu_to_be32(qp->rq.head & 0xffff);
+ }
+
+ spin_unlock_irqrestore(&qp->rq.lock, flags);
+
+ return err;
+}
diff --git a/drivers/infiniband/hw/mlx4/srq.c b/drivers/infiniband/hw/mlx4/srq.c
new file mode 100644
index 00000000000..42ab4a801d6
--- /dev/null
+++ b/drivers/infiniband/hw/mlx4/srq.c
@@ -0,0 +1,334 @@
+/*
+ * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/mlx4/qp.h>
+#include <linux/mlx4/srq.h>
+
+#include "mlx4_ib.h"
+#include "user.h"
+
+static void *get_wqe(struct mlx4_ib_srq *srq, int n)
+{
+ int offset = n << srq->msrq.wqe_shift;
+
+ if (srq->buf.nbufs == 1)
+ return srq->buf.u.direct.buf + offset;
+ else
+ return srq->buf.u.page_list[offset >> PAGE_SHIFT].buf +
+ (offset & (PAGE_SIZE - 1));
+}
+
+static void mlx4_ib_srq_event(struct mlx4_srq *srq, enum mlx4_event type)
+{
+ struct ib_event event;
+ struct ib_srq *ibsrq = &to_mibsrq(srq)->ibsrq;
+
+ if (ibsrq->event_handler) {
+ event.device = ibsrq->device;
+ event.element.srq = ibsrq;
+ switch (type) {
+ case MLX4_EVENT_TYPE_SRQ_LIMIT:
+ event.event = IB_EVENT_SRQ_LIMIT_REACHED;
+ break;
+ case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR:
+ event.event = IB_EVENT_SRQ_ERR;
+ break;
+ default:
+ printk(KERN_WARNING "mlx4_ib: Unexpected event type %d "
+ "on SRQ %06x\n", type, srq->srqn);
+ return;
+ }
+
+ ibsrq->event_handler(&event, ibsrq->srq_context);
+ }
+}
+
+struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
+ struct ib_srq_init_attr *init_attr,
+ struct ib_udata *udata)
+{
+ struct mlx4_ib_dev *dev = to_mdev(pd->device);
+ struct mlx4_ib_srq *srq;
+ struct mlx4_wqe_srq_next_seg *next;
+ int desc_size;
+ int buf_size;
+ int err;
+ int i;
+
+ /* Sanity check SRQ size before proceeding */
+ if (init_attr->attr.max_wr >= dev->dev->caps.max_srq_wqes ||
+ init_attr->attr.max_sge > dev->dev->caps.max_srq_sge)
+ return ERR_PTR(-EINVAL);
+
+ srq = kmalloc(sizeof *srq, GFP_KERNEL);
+ if (!srq)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_init(&srq->mutex);
+ spin_lock_init(&srq->lock);
+ srq->msrq.max = roundup_pow_of_two(init_attr->attr.max_wr + 1);
+ srq->msrq.max_gs = init_attr->attr.max_sge;
+
+ desc_size = max(32UL,
+ roundup_pow_of_two(sizeof (struct mlx4_wqe_srq_next_seg) +
+ srq->msrq.max_gs *
+ sizeof (struct mlx4_wqe_data_seg)));
+ srq->msrq.wqe_shift = ilog2(desc_size);
+
+ buf_size = srq->msrq.max * desc_size;
+
+ if (pd->uobject) {
+ struct mlx4_ib_create_srq ucmd;
+
+ if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
+ err = -EFAULT;
+ goto err_srq;
+ }
+
+ srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr,
+ buf_size, 0);
+ if (IS_ERR(srq->umem)) {
+ err = PTR_ERR(srq->umem);
+ goto err_srq;
+ }
+
+ err = mlx4_mtt_init(dev->dev, ib_umem_page_count(srq->umem),
+ ilog2(srq->umem->page_size), &srq->mtt);
+ if (err)
+ goto err_buf;
+
+ err = mlx4_ib_umem_write_mtt(dev, &srq->mtt, srq->umem);
+ if (err)
+ goto err_mtt;
+
+ err = mlx4_ib_db_map_user(to_mucontext(pd->uobject->context),
+ ucmd.db_addr, &srq->db);
+ if (err)
+ goto err_mtt;
+ } else {
+ err = mlx4_ib_db_alloc(dev, &srq->db, 0);
+ if (err)
+ goto err_srq;
+
+ *srq->db.db = 0;
+
+ if (mlx4_buf_alloc(dev->dev, buf_size, PAGE_SIZE * 2, &srq->buf)) {
+ err = -ENOMEM;
+ goto err_db;
+ }
+
+ srq->head = 0;
+ srq->tail = srq->msrq.max - 1;
+ srq->wqe_ctr = 0;
+
+ for (i = 0; i < srq->msrq.max; ++i) {
+ next = get_wqe(srq, i);
+ next->next_wqe_index =
+ cpu_to_be16((i + 1) & (srq->msrq.max - 1));
+ }
+
+ err = mlx4_mtt_init(dev->dev, srq->buf.npages, srq->buf.page_shift,
+ &srq->mtt);
+ if (err)
+ goto err_buf;
+
+ err = mlx4_buf_write_mtt(dev->dev, &srq->mtt, &srq->buf);
+ if (err)
+ goto err_mtt;
+
+ srq->wrid = kmalloc(srq->msrq.max * sizeof (u64), GFP_KERNEL);
+ if (!srq->wrid) {
+ err = -ENOMEM;
+ goto err_mtt;
+ }
+ }
+
+ err = mlx4_srq_alloc(dev->dev, to_mpd(pd)->pdn, &srq->mtt,
+ srq->db.dma, &srq->msrq);
+ if (err)
+ goto err_wrid;
+
+ srq->msrq.event = mlx4_ib_srq_event;
+
+ if (pd->uobject)
+ if (ib_copy_to_udata(udata, &srq->msrq.srqn, sizeof (__u32))) {
+ err = -EFAULT;
+ goto err_wrid;
+ }
+
+ init_attr->attr.max_wr = srq->msrq.max - 1;
+
+ return &srq->ibsrq;
+
+err_wrid:
+ if (pd->uobject)
+ mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), &srq->db);
+ else
+ kfree(srq->wrid);
+
+err_mtt:
+ mlx4_mtt_cleanup(dev->dev, &srq->mtt);
+
+err_buf:
+ if (pd->uobject)
+ ib_umem_release(srq->umem);
+ else
+ mlx4_buf_free(dev->dev, buf_size, &srq->buf);
+
+err_db:
+ if (!pd->uobject)
+ mlx4_ib_db_free(dev, &srq->db);
+
+err_srq:
+ kfree(srq);
+
+ return ERR_PTR(err);
+}
+
+int mlx4_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
+ enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
+{
+ struct mlx4_ib_dev *dev = to_mdev(ibsrq->device);
+ struct mlx4_ib_srq *srq = to_msrq(ibsrq);
+ int ret;
+
+ /* We don't support resizing SRQs (yet?) */
+ if (attr_mask & IB_SRQ_MAX_WR)
+ return -EINVAL;
+
+ if (attr_mask & IB_SRQ_LIMIT) {
+ if (attr->srq_limit >= srq->msrq.max)
+ return -EINVAL;
+
+ mutex_lock(&srq->mutex);
+ ret = mlx4_srq_arm(dev->dev, &srq->msrq, attr->srq_limit);
+ mutex_unlock(&srq->mutex);
+
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int mlx4_ib_destroy_srq(struct ib_srq *srq)
+{
+ struct mlx4_ib_dev *dev = to_mdev(srq->device);
+ struct mlx4_ib_srq *msrq = to_msrq(srq);
+
+ mlx4_srq_free(dev->dev, &msrq->msrq);
+ mlx4_mtt_cleanup(dev->dev, &msrq->mtt);
+
+ if (srq->uobject) {
+ mlx4_ib_db_unmap_user(to_mucontext(srq->uobject->context), &msrq->db);
+ ib_umem_release(msrq->umem);
+ } else {
+ kfree(msrq->wrid);
+ mlx4_buf_free(dev->dev, msrq->msrq.max << msrq->msrq.wqe_shift,
+ &msrq->buf);
+ mlx4_ib_db_free(dev, &msrq->db);
+ }
+
+ kfree(msrq);
+
+ return 0;
+}
+
+void mlx4_ib_free_srq_wqe(struct mlx4_ib_srq *srq, int wqe_index)
+{
+ struct mlx4_wqe_srq_next_seg *next;
+
+ /* always called with interrupts disabled. */
+ spin_lock(&srq->lock);
+
+ next = get_wqe(srq, srq->tail);
+ next->next_wqe_index = cpu_to_be16(wqe_index);
+ srq->tail = wqe_index;
+
+ spin_unlock(&srq->lock);
+}
+
+int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
+ struct ib_recv_wr **bad_wr)
+{
+ struct mlx4_ib_srq *srq = to_msrq(ibsrq);
+ struct mlx4_wqe_srq_next_seg *next;
+ struct mlx4_wqe_data_seg *scat;
+ unsigned long flags;
+ int err = 0;
+ int nreq;
+ int i;
+
+ spin_lock_irqsave(&srq->lock, flags);
+
+ for (nreq = 0; wr; ++nreq, wr = wr->next) {
+ if (unlikely(wr->num_sge > srq->msrq.max_gs)) {
+ err = -EINVAL;
+ *bad_wr = wr;
+ break;
+ }
+
+ srq->wrid[srq->head] = wr->wr_id;
+
+ next = get_wqe(srq, srq->head);
+ srq->head = be16_to_cpu(next->next_wqe_index);
+ scat = (struct mlx4_wqe_data_seg *) (next + 1);
+
+ for (i = 0; i < wr->num_sge; ++i) {
+ scat[i].byte_count = cpu_to_be32(wr->sg_list[i].length);
+ scat[i].lkey = cpu_to_be32(wr->sg_list[i].lkey);
+ scat[i].addr = cpu_to_be64(wr->sg_list[i].addr);
+ }
+
+ if (i < srq->msrq.max_gs) {
+ scat[i].byte_count = 0;
+ scat[i].lkey = cpu_to_be32(MLX4_INVALID_LKEY);
+ scat[i].addr = 0;
+ }
+ }
+
+ if (likely(nreq)) {
+ srq->wqe_ctr += nreq;
+
+ /*
+ * Make sure that descriptors are written before
+ * doorbell record.
+ */
+ wmb();
+
+ *srq->db.db = cpu_to_be32(srq->wqe_ctr);
+ }
+
+ spin_unlock_irqrestore(&srq->lock, flags);
+
+ return err;
+}
diff --git a/drivers/infiniband/hw/mlx4/user.h b/drivers/infiniband/hw/mlx4/user.h
new file mode 100644
index 00000000000..5b8eddc9fa8
--- /dev/null
+++ b/drivers/infiniband/hw/mlx4/user.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef MLX4_IB_USER_H
+#define MLX4_IB_USER_H
+
+#include <linux/types.h>
+
+/*
+ * Increment this value if any changes that break userspace ABI
+ * compatibility are made.
+ */
+#define MLX4_IB_UVERBS_ABI_VERSION 1
+
+/*
+ * Make sure that all structs defined in this file remain laid out so
+ * that they pack the same way on 32-bit and 64-bit architectures (to
+ * avoid incompatibility between 32-bit userspace and 64-bit kernels).
+ * In particular do not use pointer types -- pass pointers in __u64
+ * instead.
+ */
+
+struct mlx4_ib_alloc_ucontext_resp {
+ __u32 qp_tab_size;
+ __u16 bf_reg_size;
+ __u16 bf_regs_per_page;
+};
+
+struct mlx4_ib_alloc_pd_resp {
+ __u32 pdn;
+ __u32 reserved;
+};
+
+struct mlx4_ib_create_cq {
+ __u64 buf_addr;
+ __u64 db_addr;
+};
+
+struct mlx4_ib_create_cq_resp {
+ __u32 cqn;
+ __u32 reserved;
+};
+
+struct mlx4_ib_resize_cq {
+ __u64 buf_addr;
+};
+
+struct mlx4_ib_create_srq {
+ __u64 buf_addr;
+ __u64 db_addr;
+};
+
+struct mlx4_ib_create_srq_resp {
+ __u32 srqn;
+ __u32 reserved;
+};
+
+struct mlx4_ib_create_qp {
+ __u64 buf_addr;
+ __u64 db_addr;
+};
+
+#endif /* MLX4_IB_USER_H */
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 1c05486c3c6..6bcde1cb968 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -37,6 +37,7 @@
*/
#include <rdma/ib_smi.h>
+#include <rdma/ib_umem.h>
#include <rdma/ib_user_verbs.h>
#include <linux/mm.h>
@@ -908,6 +909,8 @@ static struct ib_mr *mthca_get_dma_mr(struct ib_pd *pd, int acc)
return ERR_PTR(err);
}
+ mr->umem = NULL;
+
return &mr->ibmr;
}
@@ -1003,11 +1006,13 @@ static struct ib_mr *mthca_reg_phys_mr(struct ib_pd *pd,
}
kfree(page_list);
+ mr->umem = NULL;
+
return &mr->ibmr;
}
-static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, struct ib_umem *region,
- int acc, struct ib_udata *udata)
+static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
+ u64 virt, int acc, struct ib_udata *udata)
{
struct mthca_dev *dev = to_mdev(pd->device);
struct ib_umem_chunk *chunk;
@@ -1018,20 +1023,26 @@ static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, struct ib_umem *region,
int err = 0;
int write_mtt_size;
- shift = ffs(region->page_size) - 1;
-
mr = kmalloc(sizeof *mr, GFP_KERNEL);
if (!mr)
return ERR_PTR(-ENOMEM);
+ mr->umem = ib_umem_get(pd->uobject->context, start, length, acc);
+ if (IS_ERR(mr->umem)) {
+ err = PTR_ERR(mr->umem);
+ goto err;
+ }
+
+ shift = ffs(mr->umem->page_size) - 1;
+
n = 0;
- list_for_each_entry(chunk, &region->chunk_list, list)
+ list_for_each_entry(chunk, &mr->umem->chunk_list, list)
n += chunk->nents;
mr->mtt = mthca_alloc_mtt(dev, n);
if (IS_ERR(mr->mtt)) {
err = PTR_ERR(mr->mtt);
- goto err;
+ goto err_umem;
}
pages = (u64 *) __get_free_page(GFP_KERNEL);
@@ -1044,12 +1055,12 @@ static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, struct ib_umem *region,
write_mtt_size = min(mthca_write_mtt_size(dev), (int) (PAGE_SIZE / sizeof *pages));
- list_for_each_entry(chunk, &region->chunk_list, list)
+ list_for_each_entry(chunk, &mr->umem->chunk_list, list)
for (j = 0; j < chunk->nmap; ++j) {
len = sg_dma_len(&chunk->page_list[j]) >> shift;
for (k = 0; k < len; ++k) {
pages[i++] = sg_dma_address(&chunk->page_list[j]) +
- region->page_size * k;
+ mr->umem->page_size * k;
/*
* Be friendly to write_mtt and pass it chunks
* of appropriate size.
@@ -1071,8 +1082,8 @@ mtt_done:
if (err)
goto err_mtt;
- err = mthca_mr_alloc(dev, to_mpd(pd)->pd_num, shift, region->virt_base,
- region->length, convert_access(acc), mr);
+ err = mthca_mr_alloc(dev, to_mpd(pd)->pd_num, shift, virt, length,
+ convert_access(acc), mr);
if (err)
goto err_mtt;
@@ -1082,6 +1093,9 @@ mtt_done:
err_mtt:
mthca_free_mtt(dev, mr->mtt);
+err_umem:
+ ib_umem_release(mr->umem);
+
err:
kfree(mr);
return ERR_PTR(err);
@@ -1090,8 +1104,12 @@ err:
static int mthca_dereg_mr(struct ib_mr *mr)
{
struct mthca_mr *mmr = to_mmr(mr);
+
mthca_free_mr(to_mdev(mr->device), mmr);
+ if (mmr->umem)
+ ib_umem_release(mmr->umem);
kfree(mmr);
+
return 0;
}
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.h b/drivers/infiniband/hw/mthca/mthca_provider.h
index 1d266ac2e09..262616c8ebb 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.h
+++ b/drivers/infiniband/hw/mthca/mthca_provider.h
@@ -73,6 +73,7 @@ struct mthca_mtt;
struct mthca_mr {
struct ib_mr ibmr;
+ struct ib_umem *umem;
struct mthca_mtt *mtt;
};
diff --git a/drivers/input/Kconfig b/drivers/input/Kconfig
index 0e9b69535ad..f814fb3a469 100644
--- a/drivers/input/Kconfig
+++ b/drivers/input/Kconfig
@@ -3,6 +3,7 @@
#
menu "Input device support"
+ depends on !S390
config INPUT
tristate "Generic input layer (needed for keyboard, mouse, ...)" if EMBEDDED
diff --git a/drivers/isdn/Kconfig b/drivers/isdn/Kconfig
index c90afeea54a..d42fe89cddf 100644
--- a/drivers/isdn/Kconfig
+++ b/drivers/isdn/Kconfig
@@ -3,6 +3,7 @@
#
menu "ISDN subsystem"
+ depends on !S390
config ISDN
tristate "ISDN support"
diff --git a/drivers/isdn/capi/Kconfig b/drivers/isdn/capi/Kconfig
index c921d6c522f..c92f9d764fc 100644
--- a/drivers/isdn/capi/Kconfig
+++ b/drivers/isdn/capi/Kconfig
@@ -17,7 +17,7 @@ config CAPI_TRACE
help
If you say Y here, the kernelcapi driver can make verbose traces
of CAPI messages. This feature can be enabled/disabled via IOCTL for
- every controler (default disabled).
+ every controller (default disabled).
This will increase the size of the kernelcapi module by 20 KB.
If unsure, say Y.
diff --git a/drivers/isdn/hardware/eicon/divasync.h b/drivers/isdn/hardware/eicon/divasync.h
index af3eb9e795b..85784a7ffb2 100644
--- a/drivers/isdn/hardware/eicon/divasync.h
+++ b/drivers/isdn/hardware/eicon/divasync.h
@@ -216,7 +216,7 @@ typedef struct
#define SERIAL_HOOK_RING 0x85
#define SERIAL_HOOK_DETACH 0x8f
unsigned char Flags; /* function refinements */
- /* parameters passed by the the ATTACH request */
+ /* parameters passed by the ATTACH request */
SERIAL_INT_CB InterruptHandler; /* called on each interrupt */
SERIAL_DPC_CB DeferredHandler; /* called on hook state changes */
void *HandlerContext; /* context for both handlers */
diff --git a/drivers/isdn/hisax/hfc_usb.c b/drivers/isdn/hisax/hfc_usb.c
index 99e70d4103b..1f18f199338 100644
--- a/drivers/isdn/hisax/hfc_usb.c
+++ b/drivers/isdn/hisax/hfc_usb.c
@@ -1217,11 +1217,11 @@ usb_init(hfcusb_data * hfc)
/* aux = output, reset off */
write_usb(hfc, HFCUSB_CIRM, 0x10);
- /* set USB_SIZE to match the the wMaxPacketSize for INT or BULK transfers */
+ /* set USB_SIZE to match the wMaxPacketSize for INT or BULK transfers */
write_usb(hfc, HFCUSB_USB_SIZE,
(hfc->packet_size / 8) | ((hfc->packet_size / 8) << 4));
- /* set USB_SIZE_I to match the the wMaxPacketSize for ISO transfers */
+ /* set USB_SIZE_I to match the wMaxPacketSize for ISO transfers */
write_usb(hfc, HFCUSB_USB_SIZE_I, hfc->iso_packet_size);
/* enable PCM/GCI master mode */
diff --git a/drivers/kvm/Kconfig b/drivers/kvm/Kconfig
index 703cc88d1ef..e8e37d82647 100644
--- a/drivers/kvm/Kconfig
+++ b/drivers/kvm/Kconfig
@@ -2,6 +2,7 @@
# KVM configuration
#
menu "Virtualization"
+ depends on X86
config KVM
tristate "Kernel-based Virtual Machine (KVM) support"
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
index c8b8cfa332b..0d892600ff0 100644
--- a/drivers/kvm/kvm_main.c
+++ b/drivers/kvm/kvm_main.c
@@ -2889,7 +2889,9 @@ static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
switch (val) {
case CPU_DOWN_PREPARE:
+ case CPU_DOWN_PREPARE_FROZEN:
case CPU_UP_CANCELED:
+ case CPU_UP_CANCELED_FROZEN:
printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
cpu);
decache_vcpus_on_cpu(cpu);
@@ -2897,6 +2899,7 @@ static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
NULL, 0, 1);
break;
case CPU_ONLINE:
+ case CPU_ONLINE_FROZEN:
printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
cpu);
smp_call_function_single(cpu, kvm_arch_ops->hardware_enable,
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 80acd08f0e9..87d2046f866 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -1,5 +1,6 @@
menu "LED devices"
+ depends on HAS_IOMEM
config NEW_LEDS
bool "LED Support"
diff --git a/drivers/leds/leds-h1940.c b/drivers/leds/leds-h1940.c
index 1d49d2ade55..677c99325be 100644
--- a/drivers/leds/leds-h1940.c
+++ b/drivers/leds/leds-h1940.c
@@ -1,5 +1,5 @@
/*
- * drivers/leds/h1940-leds.c
+ * drivers/leds/leds-h1940.c
* Copyright (c) Arnaud Patard <arnaud.patard@rtp-net.org>
*
* This file is subject to the terms and conditions of the GNU General Public
diff --git a/drivers/macintosh/Kconfig b/drivers/macintosh/Kconfig
index a32c91e27b3..58926da0ae1 100644
--- a/drivers/macintosh/Kconfig
+++ b/drivers/macintosh/Kconfig
@@ -237,7 +237,7 @@ config PMAC_RACKMETER
tristate "Support for Apple XServe front panel LEDs"
depends on PPC_PMAC
help
- This driver procides some support to control the front panel
+ This driver provides some support to control the front panel
blue LEDs "vu-meter" of the XServer macs.
endif # MACINTOSH_DRIVERS
diff --git a/drivers/macintosh/mediabay.c b/drivers/macintosh/mediabay.c
index 0acf2f7fd9d..c803d2bba65 100644
--- a/drivers/macintosh/mediabay.c
+++ b/drivers/macintosh/mediabay.c
@@ -563,7 +563,7 @@ static void media_bay_step(int i)
ide_init_hwif_ports(&hw, (unsigned long) bay->cd_base, (unsigned long) 0, NULL);
hw.irq = bay->cd_irq;
hw.chipset = ide_pmac;
- bay->cd_index = ide_register_hw(&hw, NULL);
+ bay->cd_index = ide_register_hw(&hw, 0, NULL);
pmu_resume();
}
if (bay->cd_index == -1) {
diff --git a/drivers/mca/mca-bus.c b/drivers/mca/mca-bus.c
index da862e4632d..67b8e9453b1 100644
--- a/drivers/mca/mca-bus.c
+++ b/drivers/mca/mca-bus.c
@@ -47,19 +47,25 @@ static int mca_bus_match (struct device *dev, struct device_driver *drv)
{
struct mca_device *mca_dev = to_mca_device (dev);
struct mca_driver *mca_drv = to_mca_driver (drv);
- const short *mca_ids = mca_drv->id_table;
- int i;
-
- if (!mca_ids)
- return 0;
-
- for(i = 0; mca_ids[i]; i++) {
- if (mca_ids[i] == mca_dev->pos_id) {
- mca_dev->index = i;
- return 1;
+ const unsigned short *mca_ids = mca_drv->id_table;
+ int i = 0;
+
+ if (mca_ids) {
+ for(i = 0; mca_ids[i]; i++) {
+ if (mca_ids[i] == mca_dev->pos_id) {
+ mca_dev->index = i;
+ return 1;
+ }
}
}
-
+ /* If the integrated id is present, treat it as though it were an
+ * additional id in the id_table (it can't be because by definition,
+ * integrated id's overflow a short */
+ if (mca_drv->integrated_id && mca_dev->pos_id ==
+ mca_drv->integrated_id) {
+ mca_dev->index = i;
+ return 1;
+ }
return 0;
}
diff --git a/drivers/mca/mca-driver.c b/drivers/mca/mca-driver.c
index 2223466b3d8..32cd39bcc71 100644
--- a/drivers/mca/mca-driver.c
+++ b/drivers/mca/mca-driver.c
@@ -36,12 +36,25 @@ int mca_register_driver(struct mca_driver *mca_drv)
mca_drv->driver.bus = &mca_bus_type;
if ((r = driver_register(&mca_drv->driver)) < 0)
return r;
+ mca_drv->integrated_id = 0;
}
return 0;
}
EXPORT_SYMBOL(mca_register_driver);
+int mca_register_driver_integrated(struct mca_driver *mca_driver,
+ int integrated_id)
+{
+ int r = mca_register_driver(mca_driver);
+
+ if (!r)
+ mca_driver->integrated_id = integrated_id;
+
+ return r;
+}
+EXPORT_SYMBOL(mca_register_driver_integrated);
+
void mca_unregister_driver(struct mca_driver *mca_drv)
{
if (MCA_bus)
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 4540ade6b6b..7df934d6913 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -262,6 +262,15 @@ config DM_MULTIPATH_EMC
---help---
Multipath support for EMC CX/AX series hardware.
+config DM_DELAY
+ tristate "I/O delaying target (EXPERIMENTAL)"
+ depends on BLK_DEV_DM && EXPERIMENTAL
+ ---help---
+ A target that delays reads and/or writes and can send
+ them to different devices. Useful for testing.
+
+ If unsure, say N.
+
endmenu
endif
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index 34957a68d92..38754084eac 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -31,6 +31,7 @@ obj-$(CONFIG_MD_FAULTY) += faulty.o
obj-$(CONFIG_BLK_DEV_MD) += md-mod.o
obj-$(CONFIG_BLK_DEV_DM) += dm-mod.o
obj-$(CONFIG_DM_CRYPT) += dm-crypt.o
+obj-$(CONFIG_DM_DELAY) += dm-delay.o
obj-$(CONFIG_DM_MULTIPATH) += dm-multipath.o dm-round-robin.o
obj-$(CONFIG_DM_MULTIPATH_EMC) += dm-emc.o
obj-$(CONFIG_DM_SNAPSHOT) += dm-snapshot.o
diff --git a/drivers/md/dm-bio-list.h b/drivers/md/dm-bio-list.h
index da4349649f7..c6be88826fa 100644
--- a/drivers/md/dm-bio-list.h
+++ b/drivers/md/dm-bio-list.h
@@ -8,17 +8,43 @@
#define DM_BIO_LIST_H
#include <linux/bio.h>
+#include <linux/prefetch.h>
struct bio_list {
struct bio *head;
struct bio *tail;
};
+static inline int bio_list_empty(const struct bio_list *bl)
+{
+ return bl->head == NULL;
+}
+
+#define BIO_LIST_INIT { .head = NULL, .tail = NULL }
+
+#define BIO_LIST(bl) \
+ struct bio_list bl = BIO_LIST_INIT
+
static inline void bio_list_init(struct bio_list *bl)
{
bl->head = bl->tail = NULL;
}
+#define bio_list_for_each(bio, bl) \
+ for (bio = (bl)->head; bio && ({ prefetch(bio->bi_next); 1; }); \
+ bio = bio->bi_next)
+
+static inline unsigned bio_list_size(const struct bio_list *bl)
+{
+ unsigned sz = 0;
+ struct bio *bio;
+
+ bio_list_for_each(bio, bl)
+ sz++;
+
+ return sz;
+}
+
static inline void bio_list_add(struct bio_list *bl, struct bio *bio)
{
bio->bi_next = NULL;
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index d8121234c34..7b0fcfc9eaa 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -33,7 +33,6 @@
struct crypt_io {
struct dm_target *target;
struct bio *base_bio;
- struct bio *first_clone;
struct work_struct work;
atomic_t pending;
int error;
@@ -107,6 +106,8 @@ struct crypt_config {
static struct kmem_cache *_crypt_io_pool;
+static void clone_init(struct crypt_io *, struct bio *);
+
/*
* Different IV generation algorithms:
*
@@ -120,6 +121,9 @@ static struct kmem_cache *_crypt_io_pool;
* benbi: the 64-bit "big-endian 'narrow block'-count", starting at 1
* (needed for LRW-32-AES and possible other narrow block modes)
*
+ * null: the initial vector is always zero. Provides compatibility with
+ * obsolete loop_fish2 devices. Do not use for new devices.
+ *
* plumb: unimplemented, see:
* http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454
*/
@@ -256,6 +260,13 @@ static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
return 0;
}
+static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
+{
+ memset(iv, 0, cc->iv_size);
+
+ return 0;
+}
+
static struct crypt_iv_operations crypt_iv_plain_ops = {
.generator = crypt_iv_plain_gen
};
@@ -272,6 +283,10 @@ static struct crypt_iv_operations crypt_iv_benbi_ops = {
.generator = crypt_iv_benbi_gen
};
+static struct crypt_iv_operations crypt_iv_null_ops = {
+ .generator = crypt_iv_null_gen
+};
+
static int
crypt_convert_scatterlist(struct crypt_config *cc, struct scatterlist *out,
struct scatterlist *in, unsigned int length,
@@ -378,36 +393,21 @@ static int crypt_convert(struct crypt_config *cc,
* This should never violate the device limitations
* May return a smaller bio when running out of pages
*/
-static struct bio *
-crypt_alloc_buffer(struct crypt_config *cc, unsigned int size,
- struct bio *base_bio, unsigned int *bio_vec_idx)
+static struct bio *crypt_alloc_buffer(struct crypt_io *io, unsigned int size)
{
+ struct crypt_config *cc = io->target->private;
struct bio *clone;
unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
unsigned int i;
- if (base_bio) {
- clone = bio_alloc_bioset(GFP_NOIO, base_bio->bi_max_vecs, cc->bs);
- __bio_clone(clone, base_bio);
- } else
- clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);
-
+ clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);
if (!clone)
return NULL;
- clone->bi_destructor = dm_crypt_bio_destructor;
-
- /* if the last bio was not complete, continue where that one ended */
- clone->bi_idx = *bio_vec_idx;
- clone->bi_vcnt = *bio_vec_idx;
- clone->bi_size = 0;
- clone->bi_flags &= ~(1 << BIO_SEG_VALID);
-
- /* clone->bi_idx pages have already been allocated */
- size -= clone->bi_idx * PAGE_SIZE;
+ clone_init(io, clone);
- for (i = clone->bi_idx; i < nr_iovecs; i++) {
+ for (i = 0; i < nr_iovecs; i++) {
struct bio_vec *bv = bio_iovec_idx(clone, i);
bv->bv_page = mempool_alloc(cc->page_pool, gfp_mask);
@@ -419,7 +419,7 @@ crypt_alloc_buffer(struct crypt_config *cc, unsigned int size,
* return a partially allocated bio, the caller will then try
* to allocate additional bios while submitting this partial bio
*/
- if ((i - clone->bi_idx) == (MIN_BIO_PAGES - 1))
+ if (i == (MIN_BIO_PAGES - 1))
gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT;
bv->bv_offset = 0;
@@ -438,12 +438,6 @@ crypt_alloc_buffer(struct crypt_config *cc, unsigned int size,
return NULL;
}
- /*
- * Remember the last bio_vec allocated to be able
- * to correctly continue after the splitting.
- */
- *bio_vec_idx = clone->bi_vcnt;
-
return clone;
}
@@ -495,9 +489,6 @@ static void dec_pending(struct crypt_io *io, int error)
if (!atomic_dec_and_test(&io->pending))
return;
- if (io->first_clone)
- bio_put(io->first_clone);
-
bio_endio(io->base_bio, io->base_bio->bi_size, io->error);
mempool_free(io, cc->io_pool);
@@ -562,6 +553,7 @@ static void clone_init(struct crypt_io *io, struct bio *clone)
clone->bi_end_io = crypt_endio;
clone->bi_bdev = cc->dev->bdev;
clone->bi_rw = io->base_bio->bi_rw;
+ clone->bi_destructor = dm_crypt_bio_destructor;
}
static void process_read(struct crypt_io *io)
@@ -585,7 +577,6 @@ static void process_read(struct crypt_io *io)
}
clone_init(io, clone);
- clone->bi_destructor = dm_crypt_bio_destructor;
clone->bi_idx = 0;
clone->bi_vcnt = bio_segments(base_bio);
clone->bi_size = base_bio->bi_size;
@@ -604,7 +595,6 @@ static void process_write(struct crypt_io *io)
struct convert_context ctx;
unsigned remaining = base_bio->bi_size;
sector_t sector = base_bio->bi_sector - io->target->begin;
- unsigned bvec_idx = 0;
atomic_inc(&io->pending);
@@ -615,14 +605,14 @@ static void process_write(struct crypt_io *io)
* so repeat the whole process until all the data can be handled.
*/
while (remaining) {
- clone = crypt_alloc_buffer(cc, base_bio->bi_size,
- io->first_clone, &bvec_idx);
+ clone = crypt_alloc_buffer(io, remaining);
if (unlikely(!clone)) {
dec_pending(io, -ENOMEM);
return;
}
ctx.bio_out = clone;
+ ctx.idx_out = 0;
if (unlikely(crypt_convert(cc, &ctx) < 0)) {
crypt_free_buffer_pages(cc, clone, clone->bi_size);
@@ -631,31 +621,26 @@ static void process_write(struct crypt_io *io)
return;
}
- clone_init(io, clone);
- clone->bi_sector = cc->start + sector;
-
- if (!io->first_clone) {
- /*
- * hold a reference to the first clone, because it
- * holds the bio_vec array and that can't be freed
- * before all other clones are released
- */
- bio_get(clone);
- io->first_clone = clone;
- }
+ /* crypt_convert should have filled the clone bio */
+ BUG_ON(ctx.idx_out < clone->bi_vcnt);
+ clone->bi_sector = cc->start + sector;
remaining -= clone->bi_size;
sector += bio_sectors(clone);
- /* prevent bio_put of first_clone */
+ /* Grab another reference to the io struct
+ * before we kick off the request */
if (remaining)
atomic_inc(&io->pending);
generic_make_request(clone);
+ /* Do not reference clone after this - it
+ * may be gone already. */
+
/* out of memory -> run queues */
if (remaining)
- congestion_wait(bio_data_dir(clone), HZ/100);
+ congestion_wait(WRITE, HZ/100);
}
}
@@ -832,6 +817,8 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
cc->iv_gen_ops = &crypt_iv_essiv_ops;
else if (strcmp(ivmode, "benbi") == 0)
cc->iv_gen_ops = &crypt_iv_benbi_ops;
+ else if (strcmp(ivmode, "null") == 0)
+ cc->iv_gen_ops = &crypt_iv_null_ops;
else {
ti->error = "Invalid IV mode";
goto bad2;
@@ -954,10 +941,12 @@ static int crypt_map(struct dm_target *ti, struct bio *bio,
struct crypt_config *cc = ti->private;
struct crypt_io *io;
+ if (bio_barrier(bio))
+ return -EOPNOTSUPP;
+
io = mempool_alloc(cc->io_pool, GFP_NOIO);
io->target = ti;
io->base_bio = bio;
- io->first_clone = NULL;
io->error = io->post_process = 0;
atomic_set(&io->pending, 0);
kcryptd_queue_io(io);
@@ -1057,7 +1046,7 @@ error:
static struct target_type crypt_target = {
.name = "crypt",
- .version= {1, 3, 0},
+ .version= {1, 5, 0},
.module = THIS_MODULE,
.ctr = crypt_ctr,
.dtr = crypt_dtr,
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
new file mode 100644
index 00000000000..52c7cf9e580
--- /dev/null
+++ b/drivers/md/dm-delay.c
@@ -0,0 +1,383 @@
+/*
+ * Copyright (C) 2005-2007 Red Hat GmbH
+ *
+ * A target that delays reads and/or writes and can send
+ * them to different devices.
+ *
+ * This file is released under the GPL.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/bio.h>
+#include <linux/slab.h>
+
+#include "dm.h"
+#include "dm-bio-list.h"
+
+#define DM_MSG_PREFIX "delay"
+
+struct delay_c {
+ struct timer_list delay_timer;
+ struct semaphore timer_lock;
+ struct work_struct flush_expired_bios;
+ struct list_head delayed_bios;
+ atomic_t may_delay;
+ mempool_t *delayed_pool;
+
+ struct dm_dev *dev_read;
+ sector_t start_read;
+ unsigned read_delay;
+ unsigned reads;
+
+ struct dm_dev *dev_write;
+ sector_t start_write;
+ unsigned write_delay;
+ unsigned writes;
+};
+
+struct delay_info {
+ struct delay_c *context;
+ struct list_head list;
+ struct bio *bio;
+ unsigned long expires;
+};
+
+static DEFINE_MUTEX(delayed_bios_lock);
+
+static struct workqueue_struct *kdelayd_wq;
+static struct kmem_cache *delayed_cache;
+
+static void handle_delayed_timer(unsigned long data)
+{
+ struct delay_c *dc = (struct delay_c *)data;
+
+ queue_work(kdelayd_wq, &dc->flush_expired_bios);
+}
+
+static void queue_timeout(struct delay_c *dc, unsigned long expires)
+{
+ down(&dc->timer_lock);
+
+ if (!timer_pending(&dc->delay_timer) || expires < dc->delay_timer.expires)
+ mod_timer(&dc->delay_timer, expires);
+
+ up(&dc->timer_lock);
+}
+
+static void flush_bios(struct bio *bio)
+{
+ struct bio *n;
+
+ while (bio) {
+ n = bio->bi_next;
+ bio->bi_next = NULL;
+ generic_make_request(bio);
+ bio = n;
+ }
+}
+
+static struct bio *flush_delayed_bios(struct delay_c *dc, int flush_all)
+{
+ struct delay_info *delayed, *next;
+ unsigned long next_expires = 0;
+ int start_timer = 0;
+ BIO_LIST(flush_bios);
+
+ mutex_lock(&delayed_bios_lock);
+ list_for_each_entry_safe(delayed, next, &dc->delayed_bios, list) {
+ if (flush_all || time_after_eq(jiffies, delayed->expires)) {
+ list_del(&delayed->list);
+ bio_list_add(&flush_bios, delayed->bio);
+ if ((bio_data_dir(delayed->bio) == WRITE))
+ delayed->context->writes--;
+ else
+ delayed->context->reads--;
+ mempool_free(delayed, dc->delayed_pool);
+ continue;
+ }
+
+ if (!start_timer) {
+ start_timer = 1;
+ next_expires = delayed->expires;
+ } else
+ next_expires = min(next_expires, delayed->expires);
+ }
+
+ mutex_unlock(&delayed_bios_lock);
+
+ if (start_timer)
+ queue_timeout(dc, next_expires);
+
+ return bio_list_get(&flush_bios);
+}
+
+static void flush_expired_bios(struct work_struct *work)
+{
+ struct delay_c *dc;
+
+ dc = container_of(work, struct delay_c, flush_expired_bios);
+ flush_bios(flush_delayed_bios(dc, 0));
+}
+
+/*
+ * Mapping parameters:
+ * <device> <offset> <delay> [<write_device> <write_offset> <write_delay>]
+ *
+ * With separate write parameters, the first set is only used for reads.
+ * Delays are specified in milliseconds.
+ */
+static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+{
+ struct delay_c *dc;
+ unsigned long long tmpll;
+
+ if (argc != 3 && argc != 6) {
+ ti->error = "requires exactly 3 or 6 arguments";
+ return -EINVAL;
+ }
+
+ dc = kmalloc(sizeof(*dc), GFP_KERNEL);
+ if (!dc) {
+ ti->error = "Cannot allocate context";
+ return -ENOMEM;
+ }
+
+ dc->reads = dc->writes = 0;
+
+ if (sscanf(argv[1], "%llu", &tmpll) != 1) {
+ ti->error = "Invalid device sector";
+ goto bad;
+ }
+ dc->start_read = tmpll;
+
+ if (sscanf(argv[2], "%u", &dc->read_delay) != 1) {
+ ti->error = "Invalid delay";
+ goto bad;
+ }
+
+ if (dm_get_device(ti, argv[0], dc->start_read, ti->len,
+ dm_table_get_mode(ti->table), &dc->dev_read)) {
+ ti->error = "Device lookup failed";
+ goto bad;
+ }
+
+ if (argc == 3) {
+ dc->dev_write = NULL;
+ goto out;
+ }
+
+ if (sscanf(argv[4], "%llu", &tmpll) != 1) {
+ ti->error = "Invalid write device sector";
+ goto bad;
+ }
+ dc->start_write = tmpll;
+
+ if (sscanf(argv[5], "%u", &dc->write_delay) != 1) {
+ ti->error = "Invalid write delay";
+ goto bad;
+ }
+
+ if (dm_get_device(ti, argv[3], dc->start_write, ti->len,
+ dm_table_get_mode(ti->table), &dc->dev_write)) {
+ ti->error = "Write device lookup failed";
+ dm_put_device(ti, dc->dev_read);
+ goto bad;
+ }
+
+out:
+ dc->delayed_pool = mempool_create_slab_pool(128, delayed_cache);
+ if (!dc->delayed_pool) {
+ DMERR("Couldn't create delayed bio pool.");
+ goto bad;
+ }
+
+ init_timer(&dc->delay_timer);
+ dc->delay_timer.function = handle_delayed_timer;
+ dc->delay_timer.data = (unsigned long)dc;
+
+ INIT_WORK(&dc->flush_expired_bios, flush_expired_bios);
+ INIT_LIST_HEAD(&dc->delayed_bios);
+ init_MUTEX(&dc->timer_lock);
+ atomic_set(&dc->may_delay, 1);
+
+ ti->private = dc;
+ return 0;
+
+bad:
+ kfree(dc);
+ return -EINVAL;
+}
+
+static void delay_dtr(struct dm_target *ti)
+{
+ struct delay_c *dc = ti->private;
+
+ flush_workqueue(kdelayd_wq);
+
+ dm_put_device(ti, dc->dev_read);
+
+ if (dc->dev_write)
+ dm_put_device(ti, dc->dev_write);
+
+ mempool_destroy(dc->delayed_pool);
+ kfree(dc);
+}
+
+static int delay_bio(struct delay_c *dc, int delay, struct bio *bio)
+{
+ struct delay_info *delayed;
+ unsigned long expires = 0;
+
+ if (!delay || !atomic_read(&dc->may_delay))
+ return 1;
+
+ delayed = mempool_alloc(dc->delayed_pool, GFP_NOIO);
+
+ delayed->context = dc;
+ delayed->bio = bio;
+ delayed->expires = expires = jiffies + (delay * HZ / 1000);
+
+ mutex_lock(&delayed_bios_lock);
+
+ if (bio_data_dir(bio) == WRITE)
+ dc->writes++;
+ else
+ dc->reads++;
+
+ list_add_tail(&delayed->list, &dc->delayed_bios);
+
+ mutex_unlock(&delayed_bios_lock);
+
+ queue_timeout(dc, expires);
+
+ return 0;
+}
+
+static void delay_presuspend(struct dm_target *ti)
+{
+ struct delay_c *dc = ti->private;
+
+ atomic_set(&dc->may_delay, 0);
+ del_timer_sync(&dc->delay_timer);
+ flush_bios(flush_delayed_bios(dc, 1));
+}
+
+static void delay_resume(struct dm_target *ti)
+{
+ struct delay_c *dc = ti->private;
+
+ atomic_set(&dc->may_delay, 1);
+}
+
+static int delay_map(struct dm_target *ti, struct bio *bio,
+ union map_info *map_context)
+{
+ struct delay_c *dc = ti->private;
+
+ if ((bio_data_dir(bio) == WRITE) && (dc->dev_write)) {
+ bio->bi_bdev = dc->dev_write->bdev;
+ bio->bi_sector = dc->start_write +
+ (bio->bi_sector - ti->begin);
+
+ return delay_bio(dc, dc->write_delay, bio);
+ }
+
+ bio->bi_bdev = dc->dev_read->bdev;
+ bio->bi_sector = dc->start_read +
+ (bio->bi_sector - ti->begin);
+
+ return delay_bio(dc, dc->read_delay, bio);
+}
+
+static int delay_status(struct dm_target *ti, status_type_t type,
+ char *result, unsigned maxlen)
+{
+ struct delay_c *dc = ti->private;
+ int sz = 0;
+
+ switch (type) {
+ case STATUSTYPE_INFO:
+ DMEMIT("%u %u", dc->reads, dc->writes);
+ break;
+
+ case STATUSTYPE_TABLE:
+ DMEMIT("%s %llu %u", dc->dev_read->name,
+ (unsigned long long) dc->start_read,
+ dc->read_delay);
+ if (dc->dev_write)
+ DMEMIT("%s %llu %u", dc->dev_write->name,
+ (unsigned long long) dc->start_write,
+ dc->write_delay);
+ break;
+ }
+
+ return 0;
+}
+
+static struct target_type delay_target = {
+ .name = "delay",
+ .version = {1, 0, 2},
+ .module = THIS_MODULE,
+ .ctr = delay_ctr,
+ .dtr = delay_dtr,
+ .map = delay_map,
+ .presuspend = delay_presuspend,
+ .resume = delay_resume,
+ .status = delay_status,
+};
+
+static int __init dm_delay_init(void)
+{
+ int r = -ENOMEM;
+
+ kdelayd_wq = create_workqueue("kdelayd");
+ if (!kdelayd_wq) {
+ DMERR("Couldn't start kdelayd");
+ goto bad_queue;
+ }
+
+ delayed_cache = kmem_cache_create("dm-delay",
+ sizeof(struct delay_info),
+ __alignof__(struct delay_info),
+ 0, NULL, NULL);
+ if (!delayed_cache) {
+ DMERR("Couldn't create delayed bio cache.");
+ goto bad_memcache;
+ }
+
+ r = dm_register_target(&delay_target);
+ if (r < 0) {
+ DMERR("register failed %d", r);
+ goto bad_register;
+ }
+
+ return 0;
+
+bad_register:
+ kmem_cache_destroy(delayed_cache);
+bad_memcache:
+ destroy_workqueue(kdelayd_wq);
+bad_queue:
+ return r;
+}
+
+static void __exit dm_delay_exit(void)
+{
+ int r = dm_unregister_target(&delay_target);
+
+ if (r < 0)
+ DMERR("unregister failed %d", r);
+
+ kmem_cache_destroy(delayed_cache);
+ destroy_workqueue(kdelayd_wq);
+}
+
+/* Module hooks */
+module_init(dm_delay_init);
+module_exit(dm_delay_exit);
+
+MODULE_DESCRIPTION(DM_NAME " delay target");
+MODULE_AUTHOR("Heinz Mauelshagen <mauelshagen@redhat.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c
index 99cdffa7fbf..07e0a0c84f6 100644
--- a/drivers/md/dm-exception-store.c
+++ b/drivers/md/dm-exception-store.c
@@ -1,7 +1,8 @@
/*
- * dm-snapshot.c
+ * dm-exception-store.c
*
* Copyright (C) 2001-2002 Sistina Software (UK) Limited.
+ * Copyright (C) 2006 Red Hat GmbH
*
* This file is released under the GPL.
*/
@@ -123,6 +124,7 @@ struct pstore {
atomic_t pending_count;
uint32_t callback_count;
struct commit_callback *callbacks;
+ struct dm_io_client *io_client;
};
static inline unsigned int sectors_to_pages(unsigned int sectors)
@@ -159,14 +161,20 @@ static void free_area(struct pstore *ps)
*/
static int chunk_io(struct pstore *ps, uint32_t chunk, int rw)
{
- struct io_region where;
- unsigned long bits;
-
- where.bdev = ps->snap->cow->bdev;
- where.sector = ps->snap->chunk_size * chunk;
- where.count = ps->snap->chunk_size;
-
- return dm_io_sync_vm(1, &where, rw, ps->area, &bits);
+ struct io_region where = {
+ .bdev = ps->snap->cow->bdev,
+ .sector = ps->snap->chunk_size * chunk,
+ .count = ps->snap->chunk_size,
+ };
+ struct dm_io_request io_req = {
+ .bi_rw = rw,
+ .mem.type = DM_IO_VMA,
+ .mem.ptr.vma = ps->area,
+ .client = ps->io_client,
+ .notify.fn = NULL,
+ };
+
+ return dm_io(&io_req, 1, &where, NULL);
}
/*
@@ -213,17 +221,18 @@ static int read_header(struct pstore *ps, int *new_snapshot)
chunk_size_supplied = 0;
}
- r = dm_io_get(sectors_to_pages(ps->snap->chunk_size));
- if (r)
- return r;
+ ps->io_client = dm_io_client_create(sectors_to_pages(ps->snap->
+ chunk_size));
+ if (IS_ERR(ps->io_client))
+ return PTR_ERR(ps->io_client);
r = alloc_area(ps);
if (r)
- goto bad1;
+ return r;
r = chunk_io(ps, 0, READ);
if (r)
- goto bad2;
+ goto bad;
dh = (struct disk_header *) ps->area;
@@ -235,7 +244,7 @@ static int read_header(struct pstore *ps, int *new_snapshot)
if (le32_to_cpu(dh->magic) != SNAP_MAGIC) {
DMWARN("Invalid or corrupt snapshot");
r = -ENXIO;
- goto bad2;
+ goto bad;
}
*new_snapshot = 0;
@@ -252,27 +261,22 @@ static int read_header(struct pstore *ps, int *new_snapshot)
(unsigned long long)ps->snap->chunk_size);
/* We had a bogus chunk_size. Fix stuff up. */
- dm_io_put(sectors_to_pages(ps->snap->chunk_size));
free_area(ps);
ps->snap->chunk_size = chunk_size;
ps->snap->chunk_mask = chunk_size - 1;
ps->snap->chunk_shift = ffs(chunk_size) - 1;
- r = dm_io_get(sectors_to_pages(chunk_size));
+ r = dm_io_client_resize(sectors_to_pages(ps->snap->chunk_size),
+ ps->io_client);
if (r)
return r;
r = alloc_area(ps);
- if (r)
- goto bad1;
-
- return 0;
+ return r;
-bad2:
+bad:
free_area(ps);
-bad1:
- dm_io_put(sectors_to_pages(ps->snap->chunk_size));
return r;
}
@@ -405,7 +409,7 @@ static void persistent_destroy(struct exception_store *store)
{
struct pstore *ps = get_info(store);
- dm_io_put(sectors_to_pages(ps->snap->chunk_size));
+ dm_io_client_destroy(ps->io_client);
vfree(ps->callbacks);
free_area(ps);
kfree(ps);
diff --git a/drivers/md/dm-hw-handler.h b/drivers/md/dm-hw-handler.h
index 32eff28e4ad..e0832e6fcf3 100644
--- a/drivers/md/dm-hw-handler.h
+++ b/drivers/md/dm-hw-handler.h
@@ -16,6 +16,7 @@
struct hw_handler_type;
struct hw_handler {
struct hw_handler_type *type;
+ struct mapped_device *md;
void *context;
};
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 8bdc8a87b24..352c6fbeac5 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -1,5 +1,6 @@
/*
* Copyright (C) 2003 Sistina Software
+ * Copyright (C) 2006 Red Hat GmbH
*
* This file is released under the GPL.
*/
@@ -12,13 +13,17 @@
#include <linux/sched.h>
#include <linux/slab.h>
-static struct bio_set *_bios;
+struct dm_io_client {
+ mempool_t *pool;
+ struct bio_set *bios;
+};
/* FIXME: can we shrink this ? */
struct io {
unsigned long error;
atomic_t count;
struct task_struct *sleeper;
+ struct dm_io_client *client;
io_notify_fn callback;
void *context;
};
@@ -26,63 +31,58 @@ struct io {
/*
* io contexts are only dynamically allocated for asynchronous
* io. Since async io is likely to be the majority of io we'll
- * have the same number of io contexts as buffer heads ! (FIXME:
- * must reduce this).
+ * have the same number of io contexts as bios! (FIXME: must reduce this).
*/
-static unsigned _num_ios;
-static mempool_t *_io_pool;
static unsigned int pages_to_ios(unsigned int pages)
{
return 4 * pages; /* too many ? */
}
-static int resize_pool(unsigned int new_ios)
+/*
+ * Create a client with mempool and bioset.
+ */
+struct dm_io_client *dm_io_client_create(unsigned num_pages)
{
- int r = 0;
-
- if (_io_pool) {
- if (new_ios == 0) {
- /* free off the pool */
- mempool_destroy(_io_pool);
- _io_pool = NULL;
- bioset_free(_bios);
-
- } else {
- /* resize the pool */
- r = mempool_resize(_io_pool, new_ios, GFP_KERNEL);
- }
+ unsigned ios = pages_to_ios(num_pages);
+ struct dm_io_client *client;
- } else {
- /* create new pool */
- _io_pool = mempool_create_kmalloc_pool(new_ios,
- sizeof(struct io));
- if (!_io_pool)
- return -ENOMEM;
-
- _bios = bioset_create(16, 16);
- if (!_bios) {
- mempool_destroy(_io_pool);
- _io_pool = NULL;
- return -ENOMEM;
- }
- }
+ client = kmalloc(sizeof(*client), GFP_KERNEL);
+ if (!client)
+ return ERR_PTR(-ENOMEM);
+
+ client->pool = mempool_create_kmalloc_pool(ios, sizeof(struct io));
+ if (!client->pool)
+ goto bad;
- if (!r)
- _num_ios = new_ios;
+ client->bios = bioset_create(16, 16);
+ if (!client->bios)
+ goto bad;
- return r;
+ return client;
+
+ bad:
+ if (client->pool)
+ mempool_destroy(client->pool);
+ kfree(client);
+ return ERR_PTR(-ENOMEM);
}
+EXPORT_SYMBOL(dm_io_client_create);
-int dm_io_get(unsigned int num_pages)
+int dm_io_client_resize(unsigned num_pages, struct dm_io_client *client)
{
- return resize_pool(_num_ios + pages_to_ios(num_pages));
+ return mempool_resize(client->pool, pages_to_ios(num_pages),
+ GFP_KERNEL);
}
+EXPORT_SYMBOL(dm_io_client_resize);
-void dm_io_put(unsigned int num_pages)
+void dm_io_client_destroy(struct dm_io_client *client)
{
- resize_pool(_num_ios - pages_to_ios(num_pages));
+ mempool_destroy(client->pool);
+ bioset_free(client->bios);
+ kfree(client);
}
+EXPORT_SYMBOL(dm_io_client_destroy);
/*-----------------------------------------------------------------
* We need to keep track of which region a bio is doing io for.
@@ -118,7 +118,7 @@ static void dec_count(struct io *io, unsigned int region, int error)
io_notify_fn fn = io->callback;
void *context = io->context;
- mempool_free(io, _io_pool);
+ mempool_free(io, io->client->pool);
fn(r, context);
}
}
@@ -126,7 +126,8 @@ static void dec_count(struct io *io, unsigned int region, int error)
static int endio(struct bio *bio, unsigned int done, int error)
{
- struct io *io = (struct io *) bio->bi_private;
+ struct io *io;
+ unsigned region;
/* keep going until we've finished */
if (bio->bi_size)
@@ -135,10 +136,17 @@ static int endio(struct bio *bio, unsigned int done, int error)
if (error && bio_data_dir(bio) == READ)
zero_fill_bio(bio);
- dec_count(io, bio_get_region(bio), error);
+ /*
+ * The bio destructor in bio_put() may use the io object.
+ */
+ io = bio->bi_private;
+ region = bio_get_region(bio);
+
bio->bi_max_vecs++;
bio_put(bio);
+ dec_count(io, region, error);
+
return 0;
}
@@ -209,6 +217,9 @@ static void bvec_dp_init(struct dpages *dp, struct bio_vec *bvec)
dp->context_ptr = bvec;
}
+/*
+ * Functions for getting the pages from a VMA.
+ */
static void vm_get_page(struct dpages *dp,
struct page **p, unsigned long *len, unsigned *offset)
{
@@ -233,7 +244,34 @@ static void vm_dp_init(struct dpages *dp, void *data)
static void dm_bio_destructor(struct bio *bio)
{
- bio_free(bio, _bios);
+ struct io *io = bio->bi_private;
+
+ bio_free(bio, io->client->bios);
+}
+
+/*
+ * Functions for getting the pages from kernel memory.
+ */
+static void km_get_page(struct dpages *dp, struct page **p, unsigned long *len,
+ unsigned *offset)
+{
+ *p = virt_to_page(dp->context_ptr);
+ *offset = dp->context_u;
+ *len = PAGE_SIZE - dp->context_u;
+}
+
+static void km_next_page(struct dpages *dp)
+{
+ dp->context_ptr += PAGE_SIZE - dp->context_u;
+ dp->context_u = 0;
+}
+
+static void km_dp_init(struct dpages *dp, void *data)
+{
+ dp->get_page = km_get_page;
+ dp->next_page = km_next_page;
+ dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1);
+ dp->context_ptr = data;
}
/*-----------------------------------------------------------------
@@ -256,7 +294,7 @@ static void do_region(int rw, unsigned int region, struct io_region *where,
* to hide it from bio_add_page().
*/
num_bvecs = (remaining / (PAGE_SIZE >> SECTOR_SHIFT)) + 2;
- bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, _bios);
+ bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios);
bio->bi_sector = where->sector + (where->count - remaining);
bio->bi_bdev = where->bdev;
bio->bi_end_io = endio;
@@ -311,8 +349,9 @@ static void dispatch_io(int rw, unsigned int num_regions,
dec_count(io, 0, 0);
}
-static int sync_io(unsigned int num_regions, struct io_region *where,
- int rw, struct dpages *dp, unsigned long *error_bits)
+static int sync_io(struct dm_io_client *client, unsigned int num_regions,
+ struct io_region *where, int rw, struct dpages *dp,
+ unsigned long *error_bits)
{
struct io io;
@@ -324,6 +363,7 @@ static int sync_io(unsigned int num_regions, struct io_region *where,
io.error = 0;
atomic_set(&io.count, 1); /* see dispatch_io() */
io.sleeper = current;
+ io.client = client;
dispatch_io(rw, num_regions, where, dp, &io, 1);
@@ -340,12 +380,15 @@ static int sync_io(unsigned int num_regions, struct io_region *where,
if (atomic_read(&io.count))
return -EINTR;
- *error_bits = io.error;
+ if (error_bits)
+ *error_bits = io.error;
+
return io.error ? -EIO : 0;
}
-static int async_io(unsigned int num_regions, struct io_region *where, int rw,
- struct dpages *dp, io_notify_fn fn, void *context)
+static int async_io(struct dm_io_client *client, unsigned int num_regions,
+ struct io_region *where, int rw, struct dpages *dp,
+ io_notify_fn fn, void *context)
{
struct io *io;
@@ -355,10 +398,11 @@ static int async_io(unsigned int num_regions, struct io_region *where, int rw,
return -EIO;
}
- io = mempool_alloc(_io_pool, GFP_NOIO);
+ io = mempool_alloc(client->pool, GFP_NOIO);
io->error = 0;
atomic_set(&io->count, 1); /* see dispatch_io() */
io->sleeper = NULL;
+ io->client = client;
io->callback = fn;
io->context = context;
@@ -366,61 +410,51 @@ static int async_io(unsigned int num_regions, struct io_region *where, int rw,
return 0;
}
-int dm_io_sync(unsigned int num_regions, struct io_region *where, int rw,
- struct page_list *pl, unsigned int offset,
- unsigned long *error_bits)
+static int dp_init(struct dm_io_request *io_req, struct dpages *dp)
{
- struct dpages dp;
- list_dp_init(&dp, pl, offset);
- return sync_io(num_regions, where, rw, &dp, error_bits);
-}
+ /* Set up dpages based on memory type */
+ switch (io_req->mem.type) {
+ case DM_IO_PAGE_LIST:
+ list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset);
+ break;
+
+ case DM_IO_BVEC:
+ bvec_dp_init(dp, io_req->mem.ptr.bvec);
+ break;
+
+ case DM_IO_VMA:
+ vm_dp_init(dp, io_req->mem.ptr.vma);
+ break;
+
+ case DM_IO_KMEM:
+ km_dp_init(dp, io_req->mem.ptr.addr);
+ break;
+
+ default:
+ return -EINVAL;
+ }
-int dm_io_sync_bvec(unsigned int num_regions, struct io_region *where, int rw,
- struct bio_vec *bvec, unsigned long *error_bits)
-{
- struct dpages dp;
- bvec_dp_init(&dp, bvec);
- return sync_io(num_regions, where, rw, &dp, error_bits);
+ return 0;
}
-int dm_io_sync_vm(unsigned int num_regions, struct io_region *where, int rw,
- void *data, unsigned long *error_bits)
+/*
+ * New collapsed (a)synchronous interface
+ */
+int dm_io(struct dm_io_request *io_req, unsigned num_regions,
+ struct io_region *where, unsigned long *sync_error_bits)
{
+ int r;
struct dpages dp;
- vm_dp_init(&dp, data);
- return sync_io(num_regions, where, rw, &dp, error_bits);
-}
-int dm_io_async(unsigned int num_regions, struct io_region *where, int rw,
- struct page_list *pl, unsigned int offset,
- io_notify_fn fn, void *context)
-{
- struct dpages dp;
- list_dp_init(&dp, pl, offset);
- return async_io(num_regions, where, rw, &dp, fn, context);
-}
+ r = dp_init(io_req, &dp);
+ if (r)
+ return r;
-int dm_io_async_bvec(unsigned int num_regions, struct io_region *where, int rw,
- struct bio_vec *bvec, io_notify_fn fn, void *context)
-{
- struct dpages dp;
- bvec_dp_init(&dp, bvec);
- return async_io(num_regions, where, rw, &dp, fn, context);
-}
+ if (!io_req->notify.fn)
+ return sync_io(io_req->client, num_regions, where,
+ io_req->bi_rw, &dp, sync_error_bits);
-int dm_io_async_vm(unsigned int num_regions, struct io_region *where, int rw,
- void *data, io_notify_fn fn, void *context)
-{
- struct dpages dp;
- vm_dp_init(&dp, data);
- return async_io(num_regions, where, rw, &dp, fn, context);
+ return async_io(io_req->client, num_regions, where, io_req->bi_rw,
+ &dp, io_req->notify.fn, io_req->notify.context);
}
-
-EXPORT_SYMBOL(dm_io_get);
-EXPORT_SYMBOL(dm_io_put);
-EXPORT_SYMBOL(dm_io_sync);
-EXPORT_SYMBOL(dm_io_async);
-EXPORT_SYMBOL(dm_io_sync_bvec);
-EXPORT_SYMBOL(dm_io_async_bvec);
-EXPORT_SYMBOL(dm_io_sync_vm);
-EXPORT_SYMBOL(dm_io_async_vm);
+EXPORT_SYMBOL(dm_io);
diff --git a/drivers/md/dm-io.h b/drivers/md/dm-io.h
index f9035bfd1a9..f647e2cceaa 100644
--- a/drivers/md/dm-io.h
+++ b/drivers/md/dm-io.h
@@ -12,7 +12,7 @@
struct io_region {
struct block_device *bdev;
sector_t sector;
- sector_t count;
+ sector_t count; /* If this is zero the region is ignored. */
};
struct page_list {
@@ -20,55 +20,60 @@ struct page_list {
struct page *page;
};
-
-/*
- * 'error' is a bitset, with each bit indicating whether an error
- * occurred doing io to the corresponding region.
- */
typedef void (*io_notify_fn)(unsigned long error, void *context);
+enum dm_io_mem_type {
+ DM_IO_PAGE_LIST,/* Page list */
+ DM_IO_BVEC, /* Bio vector */
+ DM_IO_VMA, /* Virtual memory area */
+ DM_IO_KMEM, /* Kernel memory */
+};
+
+struct dm_io_memory {
+ enum dm_io_mem_type type;
+
+ union {
+ struct page_list *pl;
+ struct bio_vec *bvec;
+ void *vma;
+ void *addr;
+ } ptr;
+
+ unsigned offset;
+};
+
+struct dm_io_notify {
+ io_notify_fn fn; /* Callback for asynchronous requests */
+ void *context; /* Passed to callback */
+};
/*
- * Before anyone uses the IO interface they should call
- * dm_io_get(), specifying roughly how many pages they are
- * expecting to perform io on concurrently.
- *
- * This function may block.
+ * IO request structure
*/
-int dm_io_get(unsigned int num_pages);
-void dm_io_put(unsigned int num_pages);
+struct dm_io_client;
+struct dm_io_request {
+ int bi_rw; /* READ|WRITE - not READA */
+ struct dm_io_memory mem; /* Memory to use for io */
+ struct dm_io_notify notify; /* Synchronous if notify.fn is NULL */
+ struct dm_io_client *client; /* Client memory handler */
+};
/*
- * Synchronous IO.
+ * For async io calls, users can alternatively use the dm_io() function below
+ * and dm_io_client_create() to create private mempools for the client.
*
- * Please ensure that the rw flag in the next two functions is
- * either READ or WRITE, ie. we don't take READA. Any
- * regions with a zero count field will be ignored.
+ * Create/destroy may block.
*/
-int dm_io_sync(unsigned int num_regions, struct io_region *where, int rw,
- struct page_list *pl, unsigned int offset,
- unsigned long *error_bits);
-
-int dm_io_sync_bvec(unsigned int num_regions, struct io_region *where, int rw,
- struct bio_vec *bvec, unsigned long *error_bits);
-
-int dm_io_sync_vm(unsigned int num_regions, struct io_region *where, int rw,
- void *data, unsigned long *error_bits);
+struct dm_io_client *dm_io_client_create(unsigned num_pages);
+int dm_io_client_resize(unsigned num_pages, struct dm_io_client *client);
+void dm_io_client_destroy(struct dm_io_client *client);
/*
- * Aynchronous IO.
- *
- * The 'where' array may be safely allocated on the stack since
- * the function takes a copy.
+ * IO interface using private per-client pools.
+ * Each bit in the optional 'sync_error_bits' bitset indicates whether an
+ * error occurred doing io to the corresponding region.
*/
-int dm_io_async(unsigned int num_regions, struct io_region *where, int rw,
- struct page_list *pl, unsigned int offset,
- io_notify_fn fn, void *context);
-
-int dm_io_async_bvec(unsigned int num_regions, struct io_region *where, int rw,
- struct bio_vec *bvec, io_notify_fn fn, void *context);
-
-int dm_io_async_vm(unsigned int num_regions, struct io_region *where, int rw,
- void *data, io_notify_fn fn, void *context);
+int dm_io(struct dm_io_request *io_req, unsigned num_regions,
+ struct io_region *region, unsigned long *sync_error_bits);
#endif
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c
index 6a926135184..a66428d860f 100644
--- a/drivers/md/dm-log.c
+++ b/drivers/md/dm-log.c
@@ -149,9 +149,12 @@ struct log_c {
FORCESYNC, /* Force a sync to happen */
} sync;
+ struct dm_io_request io_req;
+
/*
* Disk log fields
*/
+ int log_dev_failed;
struct dm_dev *log_dev;
struct log_header header;
@@ -199,13 +202,20 @@ static void header_from_disk(struct log_header *core, struct log_header *disk)
core->nr_regions = le64_to_cpu(disk->nr_regions);
}
+static int rw_header(struct log_c *lc, int rw)
+{
+ lc->io_req.bi_rw = rw;
+ lc->io_req.mem.ptr.vma = lc->disk_header;
+ lc->io_req.notify.fn = NULL;
+
+ return dm_io(&lc->io_req, 1, &lc->header_location, NULL);
+}
+
static int read_header(struct log_c *log)
{
int r;
- unsigned long ebits;
- r = dm_io_sync_vm(1, &log->header_location, READ,
- log->disk_header, &ebits);
+ r = rw_header(log, READ);
if (r)
return r;
@@ -233,11 +243,8 @@ static int read_header(struct log_c *log)
static inline int write_header(struct log_c *log)
{
- unsigned long ebits;
-
header_to_disk(&log->header, log->disk_header);
- return dm_io_sync_vm(1, &log->header_location, WRITE,
- log->disk_header, &ebits);
+ return rw_header(log, WRITE);
}
/*----------------------------------------------------------------
@@ -256,6 +263,7 @@ static int create_log_context(struct dirty_log *log, struct dm_target *ti,
uint32_t region_size;
unsigned int region_count;
size_t bitset_size, buf_size;
+ int r;
if (argc < 1 || argc > 2) {
DMWARN("wrong number of arguments to mirror log");
@@ -315,6 +323,7 @@ static int create_log_context(struct dirty_log *log, struct dm_target *ti,
lc->disk_header = NULL;
} else {
lc->log_dev = dev;
+ lc->log_dev_failed = 0;
lc->header_location.bdev = lc->log_dev->bdev;
lc->header_location.sector = 0;
@@ -324,6 +333,15 @@ static int create_log_context(struct dirty_log *log, struct dm_target *ti,
buf_size = dm_round_up((LOG_OFFSET << SECTOR_SHIFT) +
bitset_size, ti->limits.hardsect_size);
lc->header_location.count = buf_size >> SECTOR_SHIFT;
+ lc->io_req.mem.type = DM_IO_VMA;
+ lc->io_req.client = dm_io_client_create(dm_div_up(buf_size,
+ PAGE_SIZE));
+ if (IS_ERR(lc->io_req.client)) {
+ r = PTR_ERR(lc->io_req.client);
+ DMWARN("couldn't allocate disk io client");
+ kfree(lc);
+ return -ENOMEM;
+ }
lc->disk_header = vmalloc(buf_size);
if (!lc->disk_header) {
@@ -424,6 +442,7 @@ static void disk_dtr(struct dirty_log *log)
dm_put_device(lc->ti, lc->log_dev);
vfree(lc->disk_header);
+ dm_io_client_destroy(lc->io_req.client);
destroy_log_context(lc);
}
@@ -437,6 +456,15 @@ static int count_bits32(uint32_t *addr, unsigned size)
return count;
}
+static void fail_log_device(struct log_c *lc)
+{
+ if (lc->log_dev_failed)
+ return;
+
+ lc->log_dev_failed = 1;
+ dm_table_event(lc->ti->table);
+}
+
static int disk_resume(struct dirty_log *log)
{
int r;
@@ -446,8 +474,19 @@ static int disk_resume(struct dirty_log *log)
/* read the disk header */
r = read_header(lc);
- if (r)
- return r;
+ if (r) {
+ DMWARN("%s: Failed to read header on mirror log device",
+ lc->log_dev->name);
+ fail_log_device(lc);
+ /*
+ * If the log device cannot be read, we must assume
+ * all regions are out-of-sync. If we simply return
+ * here, the state will be uninitialized and could
+ * lead us to return 'in-sync' status for regions
+ * that are actually 'out-of-sync'.
+ */
+ lc->header.nr_regions = 0;
+ }
/* set or clear any new bits -- device has grown */
if (lc->sync == NOSYNC)
@@ -472,7 +511,14 @@ static int disk_resume(struct dirty_log *log)
lc->header.nr_regions = lc->region_count;
/* write the new header */
- return write_header(lc);
+ r = write_header(lc);
+ if (r) {
+ DMWARN("%s: Failed to write header on mirror log device",
+ lc->log_dev->name);
+ fail_log_device(lc);
+ }
+
+ return r;
}
static uint32_t core_get_region_size(struct dirty_log *log)
@@ -516,7 +562,9 @@ static int disk_flush(struct dirty_log *log)
return 0;
r = write_header(lc);
- if (!r)
+ if (r)
+ fail_log_device(lc);
+ else
lc->touched = 0;
return r;
@@ -591,6 +639,7 @@ static int core_status(struct dirty_log *log, status_type_t status,
switch(status) {
case STATUSTYPE_INFO:
+ DMEMIT("1 %s", log->type->name);
break;
case STATUSTYPE_TABLE:
@@ -606,17 +655,17 @@ static int disk_status(struct dirty_log *log, status_type_t status,
char *result, unsigned int maxlen)
{
int sz = 0;
- char buffer[16];
struct log_c *lc = log->context;
switch(status) {
case STATUSTYPE_INFO:
+ DMEMIT("3 %s %s %c", log->type->name, lc->log_dev->name,
+ lc->log_dev_failed ? 'D' : 'A');
break;
case STATUSTYPE_TABLE:
- format_dev_t(buffer, lc->log_dev->bdev->bd_dev);
DMEMIT("%s %u %s %u ", log->type->name,
- lc->sync == DEFAULTSYNC ? 2 : 3, buffer,
+ lc->sync == DEFAULTSYNC ? 2 : 3, lc->log_dev->name,
lc->region_size);
DMEMIT_SYNC;
}
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 3aa01350696..de54b39e6ff 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -668,6 +668,9 @@ static int parse_hw_handler(struct arg_set *as, struct multipath *m)
return -EINVAL;
}
+ m->hw_handler.md = dm_table_get_md(ti->table);
+ dm_put(m->hw_handler.md);
+
r = hwht->create(&m->hw_handler, hw_argc - 1, as->argv);
if (r) {
dm_put_hw_handler(hwht);
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 23a642619be..ef124b71ccc 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -21,15 +21,11 @@
#include <linux/workqueue.h>
#define DM_MSG_PREFIX "raid1"
+#define DM_IO_PAGES 64
-static struct workqueue_struct *_kmirrord_wq;
-static struct work_struct _kmirrord_work;
-static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped);
+#define DM_RAID1_HANDLE_ERRORS 0x01
-static inline void wake(void)
-{
- queue_work(_kmirrord_wq, &_kmirrord_work);
-}
+static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped);
/*-----------------------------------------------------------------
* Region hash
@@ -125,17 +121,23 @@ struct mirror_set {
struct list_head list;
struct region_hash rh;
struct kcopyd_client *kcopyd_client;
+ uint64_t features;
spinlock_t lock; /* protects the next two lists */
struct bio_list reads;
struct bio_list writes;
+ struct dm_io_client *io_client;
+
/* recovery */
region_t nr_regions;
int in_sync;
struct mirror *default_mirror; /* Default mirror */
+ struct workqueue_struct *kmirrord_wq;
+ struct work_struct kmirrord_work;
+
unsigned int nr_mirrors;
struct mirror mirror[0];
};
@@ -153,6 +155,11 @@ static inline sector_t region_to_sector(struct region_hash *rh, region_t region)
return region << rh->region_shift;
}
+static void wake(struct mirror_set *ms)
+{
+ queue_work(ms->kmirrord_wq, &ms->kmirrord_work);
+}
+
/* FIXME move this */
static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw);
@@ -398,8 +405,7 @@ static void rh_update_states(struct region_hash *rh)
mempool_free(reg, rh->region_pool);
}
- if (!list_empty(&recovered))
- rh->log->type->flush(rh->log);
+ rh->log->type->flush(rh->log);
list_for_each_entry_safe (reg, next, &clean, list)
mempool_free(reg, rh->region_pool);
@@ -471,7 +477,7 @@ static void rh_dec(struct region_hash *rh, region_t region)
spin_unlock_irqrestore(&rh->region_lock, flags);
if (should_wake)
- wake();
+ wake(rh->ms);
}
/*
@@ -558,7 +564,7 @@ static void rh_recovery_end(struct region *reg, int success)
list_add(&reg->list, &reg->rh->recovered_regions);
spin_unlock_irq(&rh->region_lock);
- wake();
+ wake(rh->ms);
}
static void rh_flush(struct region_hash *rh)
@@ -592,7 +598,7 @@ static void rh_start_recovery(struct region_hash *rh)
for (i = 0; i < MAX_RECOVERY; i++)
up(&rh->recovery_count);
- wake();
+ wake(rh->ms);
}
/*
@@ -735,7 +741,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
/*
* We can only read balance if the region is in sync.
*/
- if (rh_in_sync(&ms->rh, region, 0))
+ if (rh_in_sync(&ms->rh, region, 1))
m = choose_mirror(ms, bio->bi_sector);
else
m = ms->default_mirror;
@@ -792,6 +798,14 @@ static void do_write(struct mirror_set *ms, struct bio *bio)
unsigned int i;
struct io_region io[KCOPYD_MAX_REGIONS+1];
struct mirror *m;
+ struct dm_io_request io_req = {
+ .bi_rw = WRITE,
+ .mem.type = DM_IO_BVEC,
+ .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx,
+ .notify.fn = write_callback,
+ .notify.context = bio,
+ .client = ms->io_client,
+ };
for (i = 0; i < ms->nr_mirrors; i++) {
m = ms->mirror + i;
@@ -802,9 +816,8 @@ static void do_write(struct mirror_set *ms, struct bio *bio)
}
bio_set_ms(bio, ms);
- dm_io_async_bvec(ms->nr_mirrors, io, WRITE,
- bio->bi_io_vec + bio->bi_idx,
- write_callback, bio);
+
+ (void) dm_io(&io_req, ms->nr_mirrors, io, NULL);
}
static void do_writes(struct mirror_set *ms, struct bio_list *writes)
@@ -870,11 +883,10 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes)
/*-----------------------------------------------------------------
* kmirrord
*---------------------------------------------------------------*/
-static LIST_HEAD(_mirror_sets);
-static DECLARE_RWSEM(_mirror_sets_lock);
-
-static void do_mirror(struct mirror_set *ms)
+static void do_mirror(struct work_struct *work)
{
+ struct mirror_set *ms =container_of(work, struct mirror_set,
+ kmirrord_work);
struct bio_list reads, writes;
spin_lock(&ms->lock);
@@ -890,16 +902,6 @@ static void do_mirror(struct mirror_set *ms)
do_writes(ms, &writes);
}
-static void do_work(struct work_struct *ignored)
-{
- struct mirror_set *ms;
-
- down_read(&_mirror_sets_lock);
- list_for_each_entry (ms, &_mirror_sets, list)
- do_mirror(ms);
- up_read(&_mirror_sets_lock);
-}
-
/*-----------------------------------------------------------------
* Target functions
*---------------------------------------------------------------*/
@@ -931,6 +933,13 @@ static struct mirror_set *alloc_context(unsigned int nr_mirrors,
ms->in_sync = 0;
ms->default_mirror = &ms->mirror[DEFAULT_MIRROR];
+ ms->io_client = dm_io_client_create(DM_IO_PAGES);
+ if (IS_ERR(ms->io_client)) {
+ ti->error = "Error creating dm_io client";
+ kfree(ms);
+ return NULL;
+ }
+
if (rh_init(&ms->rh, ms, dl, region_size, ms->nr_regions)) {
ti->error = "Error creating dirty region hash";
kfree(ms);
@@ -946,6 +955,7 @@ static void free_context(struct mirror_set *ms, struct dm_target *ti,
while (m--)
dm_put_device(ti, ms->mirror[m].dev);
+ dm_io_client_destroy(ms->io_client);
rh_exit(&ms->rh);
kfree(ms);
}
@@ -978,23 +988,6 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
return 0;
}
-static int add_mirror_set(struct mirror_set *ms)
-{
- down_write(&_mirror_sets_lock);
- list_add_tail(&ms->list, &_mirror_sets);
- up_write(&_mirror_sets_lock);
- wake();
-
- return 0;
-}
-
-static void del_mirror_set(struct mirror_set *ms)
-{
- down_write(&_mirror_sets_lock);
- list_del(&ms->list);
- up_write(&_mirror_sets_lock);
-}
-
/*
* Create dirty log: log_type #log_params <log_params>
*/
@@ -1037,16 +1030,55 @@ static struct dirty_log *create_dirty_log(struct dm_target *ti,
return dl;
}
+static int parse_features(struct mirror_set *ms, unsigned argc, char **argv,
+ unsigned *args_used)
+{
+ unsigned num_features;
+ struct dm_target *ti = ms->ti;
+
+ *args_used = 0;
+
+ if (!argc)
+ return 0;
+
+ if (sscanf(argv[0], "%u", &num_features) != 1) {
+ ti->error = "Invalid number of features";
+ return -EINVAL;
+ }
+
+ argc--;
+ argv++;
+ (*args_used)++;
+
+ if (num_features > argc) {
+ ti->error = "Not enough arguments to support feature count";
+ return -EINVAL;
+ }
+
+ if (!strcmp("handle_errors", argv[0]))
+ ms->features |= DM_RAID1_HANDLE_ERRORS;
+ else {
+ ti->error = "Unrecognised feature requested";
+ return -EINVAL;
+ }
+
+ (*args_used)++;
+
+ return 0;
+}
+
/*
* Construct a mirror mapping:
*
* log_type #log_params <log_params>
* #mirrors [mirror_path offset]{2,}
+ * [#features <features>]
*
* log_type is "core" or "disk"
* #log_params is between 1 and 3
+ *
+ * If present, features must be "handle_errors".
*/
-#define DM_IO_PAGES 64
static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
int r;
@@ -1070,8 +1102,8 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
argv++, argc--;
- if (argc != nr_mirrors * 2) {
- ti->error = "Wrong number of mirror arguments";
+ if (argc < nr_mirrors * 2) {
+ ti->error = "Too few mirror arguments";
dm_destroy_dirty_log(dl);
return -EINVAL;
}
@@ -1096,13 +1128,37 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
ti->private = ms;
ti->split_io = ms->rh.region_size;
+ ms->kmirrord_wq = create_singlethread_workqueue("kmirrord");
+ if (!ms->kmirrord_wq) {
+ DMERR("couldn't start kmirrord");
+ free_context(ms, ti, m);
+ return -ENOMEM;
+ }
+ INIT_WORK(&ms->kmirrord_work, do_mirror);
+
+ r = parse_features(ms, argc, argv, &args_used);
+ if (r) {
+ free_context(ms, ti, ms->nr_mirrors);
+ return r;
+ }
+
+ argv += args_used;
+ argc -= args_used;
+
+ if (argc) {
+ ti->error = "Too many mirror arguments";
+ free_context(ms, ti, ms->nr_mirrors);
+ return -EINVAL;
+ }
+
r = kcopyd_client_create(DM_IO_PAGES, &ms->kcopyd_client);
if (r) {
+ destroy_workqueue(ms->kmirrord_wq);
free_context(ms, ti, ms->nr_mirrors);
return r;
}
- add_mirror_set(ms);
+ wake(ms);
return 0;
}
@@ -1110,8 +1166,9 @@ static void mirror_dtr(struct dm_target *ti)
{
struct mirror_set *ms = (struct mirror_set *) ti->private;
- del_mirror_set(ms);
+ flush_workqueue(ms->kmirrord_wq);
kcopyd_client_destroy(ms->kcopyd_client);
+ destroy_workqueue(ms->kmirrord_wq);
free_context(ms, ti, ms->nr_mirrors);
}
@@ -1127,7 +1184,7 @@ static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw)
spin_unlock(&ms->lock);
if (should_wake)
- wake();
+ wake(ms);
}
/*
@@ -1222,11 +1279,9 @@ static void mirror_resume(struct dm_target *ti)
static int mirror_status(struct dm_target *ti, status_type_t type,
char *result, unsigned int maxlen)
{
- unsigned int m, sz;
+ unsigned int m, sz = 0;
struct mirror_set *ms = (struct mirror_set *) ti->private;
- sz = ms->rh.log->type->status(ms->rh.log, type, result, maxlen);
-
switch (type) {
case STATUSTYPE_INFO:
DMEMIT("%d ", ms->nr_mirrors);
@@ -1237,13 +1292,21 @@ static int mirror_status(struct dm_target *ti, status_type_t type,
(unsigned long long)ms->rh.log->type->
get_sync_count(ms->rh.log),
(unsigned long long)ms->nr_regions);
+
+ sz = ms->rh.log->type->status(ms->rh.log, type, result, maxlen);
+
break;
case STATUSTYPE_TABLE:
+ sz = ms->rh.log->type->status(ms->rh.log, type, result, maxlen);
+
DMEMIT("%d", ms->nr_mirrors);
for (m = 0; m < ms->nr_mirrors; m++)
DMEMIT(" %s %llu", ms->mirror[m].dev->name,
(unsigned long long)ms->mirror[m].offset);
+
+ if (ms->features & DM_RAID1_HANDLE_ERRORS)
+ DMEMIT(" 1 handle_errors");
}
return 0;
@@ -1251,7 +1314,7 @@ static int mirror_status(struct dm_target *ti, status_type_t type,
static struct target_type mirror_target = {
.name = "mirror",
- .version = {1, 0, 2},
+ .version = {1, 0, 3},
.module = THIS_MODULE,
.ctr = mirror_ctr,
.dtr = mirror_dtr,
@@ -1270,20 +1333,11 @@ static int __init dm_mirror_init(void)
if (r)
return r;
- _kmirrord_wq = create_singlethread_workqueue("kmirrord");
- if (!_kmirrord_wq) {
- DMERR("couldn't start kmirrord");
- dm_dirty_log_exit();
- return r;
- }
- INIT_WORK(&_kmirrord_work, do_work);
-
r = dm_register_target(&mirror_target);
if (r < 0) {
DMERR("%s: Failed to register mirror target",
mirror_target.name);
dm_dirty_log_exit();
- destroy_workqueue(_kmirrord_wq);
}
return r;
@@ -1297,7 +1351,6 @@ static void __exit dm_mirror_exit(void)
if (r < 0)
DMERR("%s: unregister failed %d", mirror_target.name, r);
- destroy_workqueue(_kmirrord_wq);
dm_dirty_log_exit();
}
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 05befa91807..2fc199b0016 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -425,13 +425,15 @@ static void close_dev(struct dm_dev *d, struct mapped_device *md)
}
/*
- * If possible (ie. blk_size[major] is set), this checks an area
- * of a destination device is valid.
+ * If possible, this checks an area of a destination device is valid.
*/
static int check_device_area(struct dm_dev *dd, sector_t start, sector_t len)
{
- sector_t dev_size;
- dev_size = dd->bdev->bd_inode->i_size >> SECTOR_SHIFT;
+ sector_t dev_size = dd->bdev->bd_inode->i_size >> SECTOR_SHIFT;
+
+ if (!dev_size)
+ return 1;
+
return ((start < dev_size) && (len <= (dev_size - start)));
}
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 11a98df298e..2717a355dc5 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1236,6 +1236,7 @@ void dm_put(struct mapped_device *md)
free_dev(md);
}
}
+EXPORT_SYMBOL_GPL(dm_put);
/*
* Process the deferred bios
diff --git a/drivers/md/kcopyd.c b/drivers/md/kcopyd.c
index b46f6c575f7..dbc234e3c69 100644
--- a/drivers/md/kcopyd.c
+++ b/drivers/md/kcopyd.c
@@ -1,5 +1,6 @@
/*
* Copyright (C) 2002 Sistina Software (UK) Limited.
+ * Copyright (C) 2006 Red Hat GmbH
*
* This file is released under the GPL.
*
@@ -45,6 +46,8 @@ struct kcopyd_client {
unsigned int nr_pages;
unsigned int nr_free_pages;
+ struct dm_io_client *io_client;
+
wait_queue_head_t destroyq;
atomic_t nr_jobs;
};
@@ -342,16 +345,20 @@ static void complete_io(unsigned long error, void *context)
static int run_io_job(struct kcopyd_job *job)
{
int r;
+ struct dm_io_request io_req = {
+ .bi_rw = job->rw,
+ .mem.type = DM_IO_PAGE_LIST,
+ .mem.ptr.pl = job->pages,
+ .mem.offset = job->offset,
+ .notify.fn = complete_io,
+ .notify.context = job,
+ .client = job->kc->io_client,
+ };
if (job->rw == READ)
- r = dm_io_async(1, &job->source, job->rw,
- job->pages,
- job->offset, complete_io, job);
-
+ r = dm_io(&io_req, 1, &job->source, NULL);
else
- r = dm_io_async(job->num_dests, job->dests, job->rw,
- job->pages,
- job->offset, complete_io, job);
+ r = dm_io(&io_req, job->num_dests, job->dests, NULL);
return r;
}
@@ -670,8 +677,9 @@ int kcopyd_client_create(unsigned int nr_pages, struct kcopyd_client **result)
return r;
}
- r = dm_io_get(nr_pages);
- if (r) {
+ kc->io_client = dm_io_client_create(nr_pages);
+ if (IS_ERR(kc->io_client)) {
+ r = PTR_ERR(kc->io_client);
client_free_pages(kc);
kfree(kc);
kcopyd_exit();
@@ -691,7 +699,7 @@ void kcopyd_client_destroy(struct kcopyd_client *kc)
/* Wait for completion of all jobs submitted by this client. */
wait_event(kc->destroyq, !atomic_read(&kc->nr_jobs));
- dm_io_put(kc->nr_pages);
+ dm_io_client_destroy(kc->io_client);
client_free_pages(kc);
client_del(kc);
kfree(kc);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 2b4315d7e5d..65814b0340c 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -33,6 +33,7 @@
*/
#include <linux/module.h>
+#include <linux/kernel.h>
#include <linux/kthread.h>
#include <linux/linkage.h>
#include <linux/raid/md.h>
@@ -273,6 +274,7 @@ static mddev_t * mddev_find(dev_t unit)
atomic_set(&new->active, 1);
spin_lock_init(&new->write_lock);
init_waitqueue_head(&new->sb_wait);
+ new->reshape_position = MaxSector;
new->queue = blk_alloc_queue(GFP_KERNEL);
if (!new->queue) {
@@ -589,14 +591,41 @@ abort:
return ret;
}
+
+static u32 md_csum_fold(u32 csum)
+{
+ csum = (csum & 0xffff) + (csum >> 16);
+ return (csum & 0xffff) + (csum >> 16);
+}
+
static unsigned int calc_sb_csum(mdp_super_t * sb)
{
+ u64 newcsum = 0;
+ u32 *sb32 = (u32*)sb;
+ int i;
unsigned int disk_csum, csum;
disk_csum = sb->sb_csum;
sb->sb_csum = 0;
- csum = csum_partial((void *)sb, MD_SB_BYTES, 0);
+
+ for (i = 0; i < MD_SB_BYTES/4 ; i++)
+ newcsum += sb32[i];
+ csum = (newcsum & 0xffffffff) + (newcsum>>32);
+
+
+#ifdef CONFIG_ALPHA
+ /* This used to use csum_partial, which was wrong for several
+ * reasons including that different results are returned on
+ * different architectures. It isn't critical that we get exactly
+ * the same return value as before (we always csum_fold before
+ * testing, and that removes any differences). However as we
+ * know that csum_partial always returned a 16bit value on
+ * alphas, do a fold to maximise conformity to previous behaviour.
+ */
+ sb->sb_csum = md_csum_fold(disk_csum);
+#else
sb->sb_csum = disk_csum;
+#endif
return csum;
}
@@ -684,7 +713,7 @@ static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version
if (sb->raid_disks <= 0)
goto abort;
- if (csum_fold(calc_sb_csum(sb)) != csum_fold(sb->sb_csum)) {
+ if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) {
printk(KERN_WARNING "md: invalid superblock checksum on %s\n",
b);
goto abort;
@@ -694,6 +723,17 @@ static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version
rdev->data_offset = 0;
rdev->sb_size = MD_SB_BYTES;
+ if (sb->state & (1<<MD_SB_BITMAP_PRESENT)) {
+ if (sb->level != 1 && sb->level != 4
+ && sb->level != 5 && sb->level != 6
+ && sb->level != 10) {
+ /* FIXME use a better test */
+ printk(KERN_WARNING
+ "md: bitmaps not supported for this level.\n");
+ goto abort;
+ }
+ }
+
if (sb->level == LEVEL_MULTIPATH)
rdev->desc_nr = -1;
else
@@ -792,16 +832,8 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
mddev->max_disks = MD_SB_DISKS;
if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
- mddev->bitmap_file == NULL) {
- if (mddev->level != 1 && mddev->level != 4
- && mddev->level != 5 && mddev->level != 6
- && mddev->level != 10) {
- /* FIXME use a better test */
- printk(KERN_WARNING "md: bitmaps not supported for this level.\n");
- return -EINVAL;
- }
+ mddev->bitmap_file == NULL)
mddev->bitmap_offset = mddev->default_bitmap_offset;
- }
} else if (mddev->pers == NULL) {
/* Insist on good event counter while assembling */
@@ -1058,6 +1090,18 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
bdevname(rdev->bdev,b));
return -EINVAL;
}
+ if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET)) {
+ if (sb->level != cpu_to_le32(1) &&
+ sb->level != cpu_to_le32(4) &&
+ sb->level != cpu_to_le32(5) &&
+ sb->level != cpu_to_le32(6) &&
+ sb->level != cpu_to_le32(10)) {
+ printk(KERN_WARNING
+ "md: bitmaps not supported for this level.\n");
+ return -EINVAL;
+ }
+ }
+
rdev->preferred_minor = 0xffff;
rdev->data_offset = le64_to_cpu(sb->data_offset);
atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
@@ -1141,14 +1185,9 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
mddev->max_disks = (4096-256)/2;
if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
- mddev->bitmap_file == NULL ) {
- if (mddev->level != 1 && mddev->level != 5 && mddev->level != 6
- && mddev->level != 10) {
- printk(KERN_WARNING "md: bitmaps not supported for this level.\n");
- return -EINVAL;
- }
+ mddev->bitmap_file == NULL )
mddev->bitmap_offset = (__s32)le32_to_cpu(sb->bitmap_offset);
- }
+
if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
mddev->reshape_position = le64_to_cpu(sb->reshape_position);
mddev->delta_disks = le32_to_cpu(sb->delta_disks);
@@ -2204,6 +2243,10 @@ static ssize_t
layout_show(mddev_t *mddev, char *page)
{
/* just a number, not meaningful for all levels */
+ if (mddev->reshape_position != MaxSector &&
+ mddev->layout != mddev->new_layout)
+ return sprintf(page, "%d (%d)\n",
+ mddev->new_layout, mddev->layout);
return sprintf(page, "%d\n", mddev->layout);
}
@@ -2212,13 +2255,16 @@ layout_store(mddev_t *mddev, const char *buf, size_t len)
{
char *e;
unsigned long n = simple_strtoul(buf, &e, 10);
- if (mddev->pers)
- return -EBUSY;
if (!*buf || (*e && *e != '\n'))
return -EINVAL;
- mddev->layout = n;
+ if (mddev->pers)
+ return -EBUSY;
+ if (mddev->reshape_position != MaxSector)
+ mddev->new_layout = n;
+ else
+ mddev->layout = n;
return len;
}
static struct md_sysfs_entry md_layout =
@@ -2230,6 +2276,10 @@ raid_disks_show(mddev_t *mddev, char *page)
{
if (mddev->raid_disks == 0)
return 0;
+ if (mddev->reshape_position != MaxSector &&
+ mddev->delta_disks != 0)
+ return sprintf(page, "%d (%d)\n", mddev->raid_disks,
+ mddev->raid_disks - mddev->delta_disks);
return sprintf(page, "%d\n", mddev->raid_disks);
}
@@ -2247,7 +2297,11 @@ raid_disks_store(mddev_t *mddev, const char *buf, size_t len)
if (mddev->pers)
rv = update_raid_disks(mddev, n);
- else
+ else if (mddev->reshape_position != MaxSector) {
+ int olddisks = mddev->raid_disks - mddev->delta_disks;
+ mddev->delta_disks = n - olddisks;
+ mddev->raid_disks = n;
+ } else
mddev->raid_disks = n;
return rv ? rv : len;
}
@@ -2257,6 +2311,10 @@ __ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store);
static ssize_t
chunk_size_show(mddev_t *mddev, char *page)
{
+ if (mddev->reshape_position != MaxSector &&
+ mddev->chunk_size != mddev->new_chunk)
+ return sprintf(page, "%d (%d)\n", mddev->new_chunk,
+ mddev->chunk_size);
return sprintf(page, "%d\n", mddev->chunk_size);
}
@@ -2267,12 +2325,15 @@ chunk_size_store(mddev_t *mddev, const char *buf, size_t len)
char *e;
unsigned long n = simple_strtoul(buf, &e, 10);
- if (mddev->pers)
- return -EBUSY;
if (!*buf || (*e && *e != '\n'))
return -EINVAL;
- mddev->chunk_size = n;
+ if (mddev->pers)
+ return -EBUSY;
+ else if (mddev->reshape_position != MaxSector)
+ mddev->new_chunk = n;
+ else
+ mddev->chunk_size = n;
return len;
}
static struct md_sysfs_entry md_chunk_size =
@@ -2637,8 +2698,7 @@ metadata_store(mddev_t *mddev, const char *buf, size_t len)
minor = simple_strtoul(buf, &e, 10);
if (e==buf || (*e && *e != '\n') )
return -EINVAL;
- if (major >= sizeof(super_types)/sizeof(super_types[0]) ||
- super_types[major].name == NULL)
+ if (major >= ARRAY_SIZE(super_types) || super_types[major].name == NULL)
return -ENOENT;
mddev->major_version = major;
mddev->minor_version = minor;
@@ -2859,6 +2919,37 @@ suspend_hi_store(mddev_t *mddev, const char *buf, size_t len)
static struct md_sysfs_entry md_suspend_hi =
__ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store);
+static ssize_t
+reshape_position_show(mddev_t *mddev, char *page)
+{
+ if (mddev->reshape_position != MaxSector)
+ return sprintf(page, "%llu\n",
+ (unsigned long long)mddev->reshape_position);
+ strcpy(page, "none\n");
+ return 5;
+}
+
+static ssize_t
+reshape_position_store(mddev_t *mddev, const char *buf, size_t len)
+{
+ char *e;
+ unsigned long long new = simple_strtoull(buf, &e, 10);
+ if (mddev->pers)
+ return -EBUSY;
+ if (buf == e || (*e && *e != '\n'))
+ return -EINVAL;
+ mddev->reshape_position = new;
+ mddev->delta_disks = 0;
+ mddev->new_level = mddev->level;
+ mddev->new_layout = mddev->layout;
+ mddev->new_chunk = mddev->chunk_size;
+ return len;
+}
+
+static struct md_sysfs_entry md_reshape_position =
+__ATTR(reshape_position, S_IRUGO|S_IWUSR, reshape_position_show,
+ reshape_position_store);
+
static struct attribute *md_default_attrs[] = {
&md_level.attr,
@@ -2871,6 +2962,7 @@ static struct attribute *md_default_attrs[] = {
&md_new_device.attr,
&md_safe_delay.attr,
&md_array_state.attr,
+ &md_reshape_position.attr,
NULL,
};
@@ -3409,6 +3501,7 @@ static int do_md_stop(mddev_t * mddev, int mode)
mddev->size = 0;
mddev->raid_disks = 0;
mddev->recovery_cp = 0;
+ mddev->reshape_position = MaxSector;
} else if (mddev->pers)
printk(KERN_INFO "md: %s switched to read-only mode.\n",
@@ -4019,7 +4112,7 @@ static int set_array_info(mddev_t * mddev, mdu_array_info_t *info)
if (info->raid_disks == 0) {
/* just setting version number for superblock loading */
if (info->major_version < 0 ||
- info->major_version >= sizeof(super_types)/sizeof(super_types[0]) ||
+ info->major_version >= ARRAY_SIZE(super_types) ||
super_types[info->major_version].name == NULL) {
/* maybe try to auto-load a module? */
printk(KERN_INFO
@@ -4941,15 +5034,6 @@ static int md_seq_open(struct inode *inode, struct file *file)
return error;
}
-static int md_seq_release(struct inode *inode, struct file *file)
-{
- struct seq_file *m = file->private_data;
- struct mdstat_info *mi = m->private;
- m->private = NULL;
- kfree(mi);
- return seq_release(inode, file);
-}
-
static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
{
struct seq_file *m = filp->private_data;
@@ -4971,7 +5055,7 @@ static const struct file_operations md_seq_fops = {
.open = md_seq_open,
.read = seq_read,
.llseek = seq_lseek,
- .release = md_seq_release,
+ .release = seq_release_private,
.poll = mdstat_poll,
};
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 97ee870b265..3a95cc5e029 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -271,21 +271,25 @@ static int raid1_end_read_request(struct bio *bio, unsigned int bytes_done, int
*/
update_head_pos(mirror, r1_bio);
- if (uptodate || (conf->raid_disks - conf->mddev->degraded) <= 1) {
- /*
- * Set R1BIO_Uptodate in our master bio, so that
- * we will return a good error code for to the higher
- * levels even if IO on some other mirrored buffer fails.
- *
- * The 'master' represents the composite IO operation to
- * user-side. So if something waits for IO, then it will
- * wait for the 'master' bio.
+ if (uptodate)
+ set_bit(R1BIO_Uptodate, &r1_bio->state);
+ else {
+ /* If all other devices have failed, we want to return
+ * the error upwards rather than fail the last device.
+ * Here we redefine "uptodate" to mean "Don't want to retry"
*/
- if (uptodate)
- set_bit(R1BIO_Uptodate, &r1_bio->state);
+ unsigned long flags;
+ spin_lock_irqsave(&conf->device_lock, flags);
+ if (r1_bio->mddev->degraded == conf->raid_disks ||
+ (r1_bio->mddev->degraded == conf->raid_disks-1 &&
+ !test_bit(Faulty, &conf->mirrors[mirror].rdev->flags)))
+ uptodate = 1;
+ spin_unlock_irqrestore(&conf->device_lock, flags);
+ }
+ if (uptodate)
raid_end_bio_io(r1_bio);
- } else {
+ else {
/*
* oops, read error:
*/
@@ -992,13 +996,14 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev)
unsigned long flags;
spin_lock_irqsave(&conf->device_lock, flags);
mddev->degraded++;
+ set_bit(Faulty, &rdev->flags);
spin_unlock_irqrestore(&conf->device_lock, flags);
/*
* if recovery is running, make sure it aborts.
*/
set_bit(MD_RECOVERY_ERR, &mddev->recovery);
- }
- set_bit(Faulty, &rdev->flags);
+ } else
+ set_bit(Faulty, &rdev->flags);
set_bit(MD_CHANGE_DEVS, &mddev->flags);
printk(KERN_ALERT "raid1: Disk failure on %s, disabling device. \n"
" Operation continuing on %d devices\n",
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 8d59914f205..061375ee659 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -353,8 +353,8 @@ static int grow_stripes(raid5_conf_t *conf, int num)
struct kmem_cache *sc;
int devs = conf->raid_disks;
- sprintf(conf->cache_name[0], "raid5/%s", mdname(conf->mddev));
- sprintf(conf->cache_name[1], "raid5/%s-alt", mdname(conf->mddev));
+ sprintf(conf->cache_name[0], "raid5-%s", mdname(conf->mddev));
+ sprintf(conf->cache_name[1], "raid5-%s-alt", mdname(conf->mddev));
conf->active_name = 0;
sc = kmem_cache_create(conf->cache_name[conf->active_name],
sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev),
diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig
index 91d25798ae4..3a80e0cc736 100644
--- a/drivers/media/Kconfig
+++ b/drivers/media/Kconfig
@@ -3,6 +3,7 @@
#
menu "Multimedia devices"
+ depends on HAS_IOMEM
config VIDEO_DEV
tristate "Video For Linux"
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb-remote.c b/drivers/media/dvb/dvb-usb/dvb-usb-remote.c
index 68ed3a78808..9200a30dd1b 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb-remote.c
+++ b/drivers/media/dvb/dvb-usb/dvb-usb-remote.c
@@ -3,7 +3,7 @@
* Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@desy.de)
* see dvb-usb-init.c for copyright information.
*
- * This file contains functions for initializing the the input-device and for handling remote-control-queries.
+ * This file contains functions for initializing the input-device and for handling remote-control-queries.
*/
#include "dvb-usb-common.h"
#include <linux/usb/input.h>
diff --git a/drivers/media/dvb/frontends/dib7000m.c b/drivers/media/dvb/frontends/dib7000m.c
index f5d40aa3d27..f64546c6aeb 100644
--- a/drivers/media/dvb/frontends/dib7000m.c
+++ b/drivers/media/dvb/frontends/dib7000m.c
@@ -266,7 +266,7 @@ static int dib7000m_sad_calib(struct dib7000m_state *state)
{
/* internal */
-// dib7000m_write_word(state, 928, (3 << 14) | (1 << 12) | (524 << 0)); // sampling clock of the SAD is writting in set_bandwidth
+// dib7000m_write_word(state, 928, (3 << 14) | (1 << 12) | (524 << 0)); // sampling clock of the SAD is written in set_bandwidth
dib7000m_write_word(state, 929, (0 << 1) | (0 << 0));
dib7000m_write_word(state, 930, 776); // 0.625*3.3 / 4096
diff --git a/drivers/media/dvb/frontends/dib7000p.c b/drivers/media/dvb/frontends/dib7000p.c
index 0349a4b5da3..aece458cfe1 100644
--- a/drivers/media/dvb/frontends/dib7000p.c
+++ b/drivers/media/dvb/frontends/dib7000p.c
@@ -223,7 +223,7 @@ static int dib7000p_set_bandwidth(struct dvb_frontend *demod, u8 BW_Idx)
static int dib7000p_sad_calib(struct dib7000p_state *state)
{
/* internal */
-// dib7000p_write_word(state, 72, (3 << 14) | (1 << 12) | (524 << 0)); // sampling clock of the SAD is writting in set_bandwidth
+// dib7000p_write_word(state, 72, (3 << 14) | (1 << 12) | (524 << 0)); // sampling clock of the SAD is written in set_bandwidth
dib7000p_write_word(state, 73, (0 << 1) | (0 << 0));
dib7000p_write_word(state, 74, 776); // 0.625*3.3 / 4096
diff --git a/drivers/media/dvb/frontends/tda10021.c b/drivers/media/dvb/frontends/tda10021.c
index 110536843e8..e725f612a6b 100644
--- a/drivers/media/dvb/frontends/tda10021.c
+++ b/drivers/media/dvb/frontends/tda10021.c
@@ -1,6 +1,6 @@
/*
TDA10021 - Single Chip Cable Channel Receiver driver module
- used on the the Siemens DVB-C cards
+ used on the Siemens DVB-C cards
Copyright (C) 1999 Convergence Integrated Media GmbH <ralph@convergence.de>
Copyright (C) 2004 Markus Schulz <msc@antzsystem.de>
diff --git a/drivers/media/dvb/frontends/ves1x93.c b/drivers/media/dvb/frontends/ves1x93.c
index 54d7b07571b..23fd0303c91 100644
--- a/drivers/media/dvb/frontends/ves1x93.c
+++ b/drivers/media/dvb/frontends/ves1x93.c
@@ -306,7 +306,7 @@ static int ves1x93_read_status(struct dvb_frontend* fe, fe_status_t* status)
* The ves1893 sometimes returns sync values that make no sense,
* because, e.g., the SIGNAL bit is 0, while some of the higher
* bits are 1 (and how can there be a CARRIER w/o a SIGNAL?).
- * Tests showed that the the VITERBI and SYNC bits are returned
+ * Tests showed that the VITERBI and SYNC bits are returned
* reliably, while the SIGNAL and CARRIER bits ar sometimes wrong.
* If such a case occurs, we read the value again, until we get a
* valid value.
diff --git a/drivers/media/video/em28xx/em28xx-i2c.c b/drivers/media/video/em28xx/em28xx-i2c.c
index 563a8319e60..54ccc6e1f92 100644
--- a/drivers/media/video/em28xx/em28xx-i2c.c
+++ b/drivers/media/video/em28xx/em28xx-i2c.c
@@ -70,7 +70,7 @@ static int em2800_i2c_send_max4(struct em28xx *dev, unsigned char addr,
ret = dev->em28xx_write_regs(dev, 4 - len, &b2[4 - len], 2 + len);
if (ret != 2 + len) {
- em28xx_warn("writting to i2c device failed (error=%i)\n", ret);
+ em28xx_warn("writing to i2c device failed (error=%i)\n", ret);
return -EIO;
}
for (write_timeout = EM2800_I2C_WRITE_TIMEOUT; write_timeout > 0;
diff --git a/drivers/media/video/em28xx/em28xx-video.c b/drivers/media/video/em28xx/em28xx-video.c
index bec67609500..2c7b158ce7e 100644
--- a/drivers/media/video/em28xx/em28xx-video.c
+++ b/drivers/media/video/em28xx/em28xx-video.c
@@ -1729,7 +1729,7 @@ static int em28xx_usb_probe(struct usb_interface *interface,
endpoint = &interface->cur_altsetting->endpoint[1].desc;
- /* check if the the device has the iso in endpoint at the correct place */
+ /* check if the device has the iso in endpoint at the correct place */
if ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) !=
USB_ENDPOINT_XFER_ISOC) {
em28xx_err(DRIVER_NAME " probing error: endpoint is non-ISO endpoint!\n");
diff --git a/drivers/media/video/pwc/philips.txt b/drivers/media/video/pwc/philips.txt
index f5e84841031..f9f3584281d 100644
--- a/drivers/media/video/pwc/philips.txt
+++ b/drivers/media/video/pwc/philips.txt
@@ -54,9 +54,9 @@ fps
Specifies the desired framerate. Is an integer in the range of 4-30.
fbufs
- This paramter specifies the number of internal buffers to use for storing
+ This parameter specifies the number of internal buffers to use for storing
frames from the cam. This will help if the process that reads images from
- the cam is a bit slow or momentarely busy. However, on slow machines it
+ the cam is a bit slow or momentarily busy. However, on slow machines it
only introduces lag, so choose carefully. The default is 3, which is
reasonable. You can set it between 2 and 5.
@@ -209,7 +209,7 @@ trace
128 0x80 PWCX debugging Off
- For example, to trace the open() & read() fuctions, sum 8 + 4 = 12,
+ For example, to trace the open() & read() functions, sum 8 + 4 = 12,
so you would supply trace=12 during insmod or modprobe. If
you want to turn the initialization and probing tracing off, set trace=0.
The default value for trace is 35 (0x23).
diff --git a/drivers/media/video/usbvideo/vicam.c b/drivers/media/video/usbvideo/vicam.c
index 876fd276824..982b115193f 100644
--- a/drivers/media/video/usbvideo/vicam.c
+++ b/drivers/media/video/usbvideo/vicam.c
@@ -28,7 +28,7 @@
*
* Portions of this code were also copied from usbvideo.c
*
- * Special thanks to the the whole team at Sourceforge for help making
+ * Special thanks to the whole team at Sourceforge for help making
* this driver become a reality. Notably:
* Andy Armstrong who reverse engineered the color encoding and
* Pavel Machek and Chris Cheney who worked on reverse engineering the
diff --git a/drivers/message/fusion/Kconfig b/drivers/message/fusion/Kconfig
index 71037f91c22..c88cc75ab49 100644
--- a/drivers/message/fusion/Kconfig
+++ b/drivers/message/fusion/Kconfig
@@ -1,5 +1,6 @@
menu "Fusion MPT device support"
+ depends on PCI
config FUSION
bool
diff --git a/drivers/message/fusion/lsi/mpi_history.txt b/drivers/message/fusion/lsi/mpi_history.txt
index d6b4c607453..ddc7ae029dd 100644
--- a/drivers/message/fusion/lsi/mpi_history.txt
+++ b/drivers/message/fusion/lsi/mpi_history.txt
@@ -571,7 +571,7 @@ mpi_fc.h
* 11-02-00 01.01.01 Original release for post 1.0 work
* 12-04-00 01.01.02 Added messages for Common Transport Send and
* Primitive Send.
- * 01-09-01 01.01.03 Modifed some of the new flags to have an MPI prefix
+ * 01-09-01 01.01.03 Modified some of the new flags to have an MPI prefix
* and modified the FcPrimitiveSend flags.
* 01-25-01 01.01.04 Move InitiatorIndex in LinkServiceRsp reply to a larger
* field.
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 97471af4309..5021d1a2a1d 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -3585,7 +3585,7 @@ initChainBuffers(MPT_ADAPTER *ioc)
* index = chain_idx
*
* Calculate the number of chain buffers needed(plus 1) per I/O
- * then multiply the the maximum number of simultaneous cmds
+ * then multiply the maximum number of simultaneous cmds
*
* num_sge = num sge in request frame + last chain buffer
* scale = num sge per chain buffer if no chain element
diff --git a/drivers/message/i2o/Kconfig b/drivers/message/i2o/Kconfig
index 6443392bfff..f4ac21e5771 100644
--- a/drivers/message/i2o/Kconfig
+++ b/drivers/message/i2o/Kconfig
@@ -1,5 +1,6 @@
menu "I2O device support"
+ depends on PCI
config I2O
tristate "I2O support"
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index ab6e985275b..a20a51efe11 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -3,6 +3,7 @@
#
menu "Multifunction device drivers"
+ depends on HAS_IOMEM
config MFD_SM501
tristate "Support for Silicon Motion SM501"
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 877e7909a0e..2f2fbffafbe 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -130,7 +130,7 @@ config SONY_LAPTOP
Read <file:Documentation/sony-laptop.txt> for more information.
-config SONY_LAPTOP_OLD
+config SONYPI_COMPAT
bool "Sonypi compatibility"
depends on SONY_LAPTOP
---help---
diff --git a/drivers/misc/asus-laptop.c b/drivers/misc/asus-laptop.c
index 65c32a95e12..4f9060a2a2f 100644
--- a/drivers/misc/asus-laptop.c
+++ b/drivers/misc/asus-laptop.c
@@ -30,7 +30,7 @@
* Eric Burghard - LED display support for W1N
* Josh Green - Light Sens support
* Thomas Tuttle - His first patch for led support was very helpfull
- *
+ * Sam Lin - GPS support
*/
#include <linux/autoconf.h>
@@ -48,7 +48,7 @@
#include <acpi/acpi_bus.h>
#include <asm/uaccess.h>
-#define ASUS_LAPTOP_VERSION "0.41"
+#define ASUS_LAPTOP_VERSION "0.42"
#define ASUS_HOTK_NAME "Asus Laptop Support"
#define ASUS_HOTK_CLASS "hotkey"
@@ -83,6 +83,7 @@
#define PLED_ON 0x20 //Phone LED
#define GLED_ON 0x40 //Gaming LED
#define LCD_ON 0x80 //LCD backlight
+#define GPS_ON 0x100 //GPS
#define ASUS_LOG ASUS_HOTK_FILE ": "
#define ASUS_ERR KERN_ERR ASUS_LOG
@@ -148,7 +149,7 @@ ASUS_HANDLE(display_set, ASUS_HOTK_PREFIX "SDSP");
ASUS_HANDLE(display_get, "\\_SB.PCI0.P0P1.VGA.GETD", /* A6B, A6K A6R A7D F3JM L4R M6R A3G
M6A M6V VX-1 V6J V6V W3Z */
"\\_SB.PCI0.P0P2.VGA.GETD", /* A3E A4K, A4D A4L A6J A7J A8J Z71V M9V
- S5A M5A z33A W1Jc W2V */
+ S5A M5A z33A W1Jc W2V G1 */
"\\_SB.PCI0.P0P3.VGA.GETD", /* A6V A6Q */
"\\_SB.PCI0.P0PA.VGA.GETD", /* A6T, A6M */
"\\_SB.PCI0.PCI1.VGAC.NMAP", /* L3C */
@@ -162,6 +163,12 @@ ASUS_HANDLE(display_get, "\\_SB.PCI0.P0P1.VGA.GETD", /* A6B, A6K A6R A7D F3JM L
ASUS_HANDLE(ls_switch, ASUS_HOTK_PREFIX "ALSC"); /* Z71A Z71V */
ASUS_HANDLE(ls_level, ASUS_HOTK_PREFIX "ALSL"); /* Z71A Z71V */
+/* GPS */
+/* R2H use different handle for GPS on/off */
+ASUS_HANDLE(gps_on, ASUS_HOTK_PREFIX "SDON"); /* R2H */
+ASUS_HANDLE(gps_off, ASUS_HOTK_PREFIX "SDOF"); /* R2H */
+ASUS_HANDLE(gps_status, ASUS_HOTK_PREFIX "GPST");
+
/*
* This is the main structure, we can use it to store anything interesting
* about the hotk device
@@ -278,12 +285,28 @@ static int read_wireless_status(int mask)
return (hotk->status & mask) ? 1 : 0;
}
+static int read_gps_status(void)
+{
+ ulong status;
+ acpi_status rv = AE_OK;
+
+ rv = acpi_evaluate_integer(gps_status_handle, NULL, NULL, &status);
+ if (ACPI_FAILURE(rv))
+ printk(ASUS_WARNING "Error reading GPS status\n");
+ else
+ return status ? 1 : 0;
+
+ return (hotk->status & GPS_ON) ? 1 : 0;
+}
+
/* Generic LED functions */
static int read_status(int mask)
{
/* There is a special method for both wireless devices */
if (mask == BT_ON || mask == WL_ON)
return read_wireless_status(mask);
+ else if (mask == GPS_ON)
+ return read_gps_status();
return (hotk->status & mask) ? 1 : 0;
}
@@ -299,6 +322,10 @@ static void write_status(acpi_handle handle, int out, int mask)
case GLED_ON:
out = (out & 0x1) + 1;
break;
+ case GPS_ON:
+ handle = (out) ? gps_on_handle : gps_off_handle;
+ out = 0x02;
+ break;
default:
out &= 0x1;
break;
@@ -667,6 +694,21 @@ static ssize_t store_lslvl(struct device *dev, struct device_attribute *attr,
return rv;
}
+/*
+ * GPS
+ */
+static ssize_t show_gps(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", read_status(GPS_ON));
+}
+
+static ssize_t store_gps(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return store_status(buf, count, NULL, GPS_ON);
+}
+
static void asus_hotk_notify(acpi_handle handle, u32 event, void *data)
{
/* TODO Find a better way to handle events count. */
@@ -715,6 +757,7 @@ static ASUS_CREATE_DEVICE_ATTR(display);
static ASUS_CREATE_DEVICE_ATTR(ledd);
static ASUS_CREATE_DEVICE_ATTR(ls_switch);
static ASUS_CREATE_DEVICE_ATTR(ls_level);
+static ASUS_CREATE_DEVICE_ATTR(gps);
static struct attribute *asuspf_attributes[] = {
&dev_attr_infos.attr,
@@ -724,6 +767,7 @@ static struct attribute *asuspf_attributes[] = {
&dev_attr_ledd.attr,
&dev_attr_ls_switch.attr,
&dev_attr_ls_level.attr,
+ &dev_attr_gps.attr,
NULL
};
@@ -763,6 +807,9 @@ static void asus_hotk_add_fs(void)
ASUS_SET_DEVICE_ATTR(ls_level, 0644, show_lslvl, store_lslvl);
ASUS_SET_DEVICE_ATTR(ls_switch, 0644, show_lssw, store_lssw);
}
+
+ if (gps_status_handle && gps_on_handle && gps_off_handle)
+ ASUS_SET_DEVICE_ATTR(gps, 0644, show_gps, store_gps);
}
static int asus_handle_init(char *name, acpi_handle * handle,
@@ -890,9 +937,13 @@ static int asus_hotk_get_info(void)
/* There is a lot of models with "ALSL", but a few get
a real light sens, so we need to check it. */
- if (ASUS_HANDLE_INIT(ls_switch))
+ if (!ASUS_HANDLE_INIT(ls_switch))
ASUS_HANDLE_INIT(ls_level);
+ ASUS_HANDLE_INIT(gps_on);
+ ASUS_HANDLE_INIT(gps_off);
+ ASUS_HANDLE_INIT(gps_status);
+
kfree(model);
return AE_OK;
@@ -950,7 +1001,7 @@ static int asus_hotk_add(struct acpi_device *device)
* We install the handler, it will receive the hotk in parameter, so, we
* could add other data to the hotk struct
*/
- status = acpi_install_notify_handler(hotk->handle, ACPI_SYSTEM_NOTIFY,
+ status = acpi_install_notify_handler(hotk->handle, ACPI_ALL_NOTIFY,
asus_hotk_notify, hotk);
if (ACPI_FAILURE(status))
printk(ASUS_ERR "Error installing notify handler\n");
@@ -981,6 +1032,9 @@ static int asus_hotk_add(struct acpi_device *device)
if (ls_level_handle)
set_light_sens_level(hotk->light_level);
+ /* GPS is on by default */
+ write_status(NULL, 1, GPS_ON);
+
end:
if (result) {
kfree(hotk->name);
@@ -997,7 +1051,7 @@ static int asus_hotk_remove(struct acpi_device *device, int type)
if (!device || !acpi_driver_data(device))
return -EINVAL;
- status = acpi_remove_notify_handler(hotk->handle, ACPI_SYSTEM_NOTIFY,
+ status = acpi_remove_notify_handler(hotk->handle, ACPI_ALL_NOTIFY,
asus_hotk_notify);
if (ACPI_FAILURE(status))
printk(ASUS_ERR "Error removing notify handler\n");
diff --git a/drivers/misc/msi-laptop.c b/drivers/misc/msi-laptop.c
index 68c4b58525b..41e901f53e7 100644
--- a/drivers/misc/msi-laptop.c
+++ b/drivers/misc/msi-laptop.c
@@ -85,7 +85,7 @@ static int set_lcd_level(int level)
buf[0] = 0x80;
buf[1] = (u8) (level*31);
- return ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, buf, sizeof(buf), NULL, 0);
+ return ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, buf, sizeof(buf), NULL, 0, 1);
}
static int get_lcd_level(void)
@@ -93,7 +93,7 @@ static int get_lcd_level(void)
u8 wdata = 0, rdata;
int result;
- result = ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, &wdata, 1, &rdata, 1);
+ result = ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, &wdata, 1, &rdata, 1, 1);
if (result < 0)
return result;
@@ -105,7 +105,7 @@ static int get_auto_brightness(void)
u8 wdata = 4, rdata;
int result;
- result = ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, &wdata, 1, &rdata, 1);
+ result = ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, &wdata, 1, &rdata, 1, 1);
if (result < 0)
return result;
@@ -119,14 +119,14 @@ static int set_auto_brightness(int enable)
wdata[0] = 4;
- result = ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, wdata, 1, &rdata, 1);
+ result = ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, wdata, 1, &rdata, 1, 1);
if (result < 0)
return result;
wdata[0] = 0x84;
wdata[1] = (rdata & 0xF7) | (enable ? 8 : 0);
- return ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, wdata, 2, NULL, 0);
+ return ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, wdata, 2, NULL, 0, 1);
}
static int get_wireless_state(int *wlan, int *bluetooth)
@@ -134,7 +134,7 @@ static int get_wireless_state(int *wlan, int *bluetooth)
u8 wdata = 0, rdata;
int result;
- result = ec_transaction(MSI_EC_COMMAND_WIRELESS, &wdata, 1, &rdata, 1);
+ result = ec_transaction(MSI_EC_COMMAND_WIRELESS, &wdata, 1, &rdata, 1, 1);
if (result < 0)
return -1;
diff --git a/drivers/misc/sony-laptop.c b/drivers/misc/sony-laptop.c
index c15c1f61bd1..8ee0321ef1c 100644
--- a/drivers/misc/sony-laptop.c
+++ b/drivers/misc/sony-laptop.c
@@ -63,7 +63,7 @@
#include <asm/uaccess.h>
#include <linux/sonypi.h>
#include <linux/sony-laptop.h>
-#ifdef CONFIG_SONY_LAPTOP_OLD
+#ifdef CONFIG_SONYPI_COMPAT
#include <linux/poll.h>
#include <linux/miscdevice.h>
#endif
@@ -114,7 +114,7 @@ MODULE_PARM_DESC(camera,
"set this to 1 to enable Motion Eye camera controls "
"(only use it if you have a C1VE or C1VN model)");
-#ifdef CONFIG_SONY_LAPTOP_OLD
+#ifdef CONFIG_SONYPI_COMPAT
static int minor = -1;
module_param(minor, int, 0);
MODULE_PARM_DESC(minor,
@@ -1504,7 +1504,7 @@ static struct attribute_group spic_attribute_group = {
};
/******** SONYPI compatibility **********/
-#ifdef CONFIG_SONY_LAPTOP_OLD
+#ifdef CONFIG_SONYPI_COMPAT
/* battery / brightness / temperature addresses */
#define SONYPI_BAT_FLAGS 0x81
@@ -1798,7 +1798,7 @@ static void sonypi_compat_exit(void)
static int sonypi_compat_init(void) { return 0; }
static void sonypi_compat_exit(void) { }
static void sonypi_compat_report_event(u8 event) { }
-#endif /* CONFIG_SONY_LAPTOP_OLD */
+#endif /* CONFIG_SONYPI_COMPAT */
/*
* ACPI callbacks
diff --git a/drivers/mmc/Kconfig b/drivers/mmc/Kconfig
index 45b7d53b949..c0b41e8bcd9 100644
--- a/drivers/mmc/Kconfig
+++ b/drivers/mmc/Kconfig
@@ -4,6 +4,7 @@
menuconfig MMC
tristate "MMC/SD card support"
+ depends on HAS_IOMEM
help
MMC is the "multi-media card" bus protocol.
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index b6c16704aaa..7385acfa1dd 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -501,9 +501,9 @@ void mmc_detect_change(struct mmc_host *host, unsigned long delay)
{
#ifdef CONFIG_MMC_DEBUG
unsigned long flags;
- spin_lock_irqsave(host->lock, flags);
+ spin_lock_irqsave(&host->lock, flags);
BUG_ON(host->removed);
- spin_unlock_irqrestore(host->lock, flags);
+ spin_unlock_irqrestore(&host->lock, flags);
#endif
mmc_schedule_delayed_work(&host->detect, delay);
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index c1b47db29bd..fbec8cd55e3 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -2,6 +2,7 @@
menuconfig MTD
tristate "Memory Technology Device (MTD) support"
+ depends on HAS_IOMEM
help
Memory Technology Devices are flash, RAM and similar chips, often
used for solid state file systems on embedded devices. This option
diff --git a/drivers/mtd/chips/Kconfig b/drivers/mtd/chips/Kconfig
index d28e0fc85e1..479d32b57a1 100644
--- a/drivers/mtd/chips/Kconfig
+++ b/drivers/mtd/chips/Kconfig
@@ -1,5 +1,4 @@
# drivers/mtd/chips/Kconfig
-# $Id: Kconfig,v 1.18 2005/11/07 11:14:22 gleixner Exp $
menu "RAM/ROM/Flash chip drivers"
depends on MTD!=n
@@ -231,45 +230,6 @@ config MTD_ABSENT
the system regardless of media presence. Device nodes created
with this driver will return -ENODEV upon access.
-config MTD_OBSOLETE_CHIPS
- bool "Older (theoretically obsoleted now) drivers for non-CFI chips"
- help
- This option does not enable any code directly, but will allow you to
- select some other chip drivers which are now considered obsolete,
- because the generic CONFIG_JEDECPROBE code above should now detect
- the chips which are supported by these drivers, and allow the generic
- CFI-compatible drivers to drive the chips. Say 'N' here unless you have
- already tried the CONFIG_JEDECPROBE method and reported its failure
- to the MTD mailing list at <linux-mtd@lists.infradead.org>
-
-config MTD_AMDSTD
- tristate "AMD compatible flash chip support (non-CFI)"
- depends on MTD_OBSOLETE_CHIPS && BROKEN
- help
- This option enables support for flash chips using AMD-compatible
- commands, including some which are not CFI-compatible and hence
- cannot be used with the CONFIG_MTD_CFI_AMDSTD option.
-
- It also works on AMD compatible chips that do conform to CFI.
-
-config MTD_SHARP
- tristate "pre-CFI Sharp chip support"
- depends on MTD_OBSOLETE_CHIPS
- help
- This option enables support for flash chips using Sharp-compatible
- commands, including some which are not CFI-compatible and hence
- cannot be used with the CONFIG_MTD_CFI_INTELxxx options.
-
-config MTD_JEDEC
- tristate "JEDEC device support"
- depends on MTD_OBSOLETE_CHIPS && BROKEN
- help
- Enable older JEDEC flash interface devices for self
- programming flash. It is commonly used in older AMD chips. It is
- only called JEDEC because the JEDEC association
- <http://www.jedec.org/> distributes the identification codes for the
- chips.
-
config MTD_XIP
bool "XIP aware MTD support"
depends on !SMP && (MTD_CFI_INTELEXT || MTD_CFI_AMDSTD) && EXPERIMENTAL && ARCH_MTD_XIP
diff --git a/drivers/mtd/chips/Makefile b/drivers/mtd/chips/Makefile
index 75bc1c2a0f4..36582412ccd 100644
--- a/drivers/mtd/chips/Makefile
+++ b/drivers/mtd/chips/Makefile
@@ -1,19 +1,15 @@
#
# linux/drivers/chips/Makefile
#
-# $Id: Makefile.common,v 1.5 2005/11/07 11:14:22 gleixner Exp $
obj-$(CONFIG_MTD) += chipreg.o
-obj-$(CONFIG_MTD_AMDSTD) += amd_flash.o
obj-$(CONFIG_MTD_CFI) += cfi_probe.o
obj-$(CONFIG_MTD_CFI_UTIL) += cfi_util.o
obj-$(CONFIG_MTD_CFI_STAA) += cfi_cmdset_0020.o
obj-$(CONFIG_MTD_CFI_AMDSTD) += cfi_cmdset_0002.o
obj-$(CONFIG_MTD_CFI_INTELEXT) += cfi_cmdset_0001.o
obj-$(CONFIG_MTD_GEN_PROBE) += gen_probe.o
-obj-$(CONFIG_MTD_JEDEC) += jedec.o
obj-$(CONFIG_MTD_JEDECPROBE) += jedec_probe.o
obj-$(CONFIG_MTD_RAM) += map_ram.o
obj-$(CONFIG_MTD_ROM) += map_rom.o
-obj-$(CONFIG_MTD_SHARP) += sharp.o
obj-$(CONFIG_MTD_ABSENT) += map_absent.o
diff --git a/drivers/mtd/chips/amd_flash.c b/drivers/mtd/chips/amd_flash.c
deleted file mode 100644
index e7999f15d85..00000000000
--- a/drivers/mtd/chips/amd_flash.c
+++ /dev/null
@@ -1,1396 +0,0 @@
-/*
- * MTD map driver for AMD compatible flash chips (non-CFI)
- *
- * Author: Jonas Holmberg <jonas.holmberg@axis.com>
- *
- * $Id: amd_flash.c,v 1.28 2005/11/07 11:14:22 gleixner Exp $
- *
- * Copyright (c) 2001 Axis Communications AB
- *
- * This file is under GPL.
- *
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <linux/init.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/flashchip.h>
-
-/* There's no limit. It exists only to avoid realloc. */
-#define MAX_AMD_CHIPS 8
-
-#define DEVICE_TYPE_X8 (8 / 8)
-#define DEVICE_TYPE_X16 (16 / 8)
-#define DEVICE_TYPE_X32 (32 / 8)
-
-/* Addresses */
-#define ADDR_MANUFACTURER 0x0000
-#define ADDR_DEVICE_ID 0x0001
-#define ADDR_SECTOR_LOCK 0x0002
-#define ADDR_HANDSHAKE 0x0003
-#define ADDR_UNLOCK_1 0x0555
-#define ADDR_UNLOCK_2 0x02AA
-
-/* Commands */
-#define CMD_UNLOCK_DATA_1 0x00AA
-#define CMD_UNLOCK_DATA_2 0x0055
-#define CMD_MANUFACTURER_UNLOCK_DATA 0x0090
-#define CMD_UNLOCK_BYPASS_MODE 0x0020
-#define CMD_PROGRAM_UNLOCK_DATA 0x00A0
-#define CMD_RESET_DATA 0x00F0
-#define CMD_SECTOR_ERASE_UNLOCK_DATA 0x0080
-#define CMD_SECTOR_ERASE_UNLOCK_DATA_2 0x0030
-
-#define CMD_UNLOCK_SECTOR 0x0060
-
-/* Manufacturers */
-#define MANUFACTURER_AMD 0x0001
-#define MANUFACTURER_ATMEL 0x001F
-#define MANUFACTURER_FUJITSU 0x0004
-#define MANUFACTURER_ST 0x0020
-#define MANUFACTURER_SST 0x00BF
-#define MANUFACTURER_TOSHIBA 0x0098
-
-/* AMD */
-#define AM29F800BB 0x2258
-#define AM29F800BT 0x22D6
-#define AM29LV800BB 0x225B
-#define AM29LV800BT 0x22DA
-#define AM29LV160DT 0x22C4
-#define AM29LV160DB 0x2249
-#define AM29BDS323D 0x22D1
-
-/* Atmel */
-#define AT49xV16x 0x00C0
-#define AT49xV16xT 0x00C2
-
-/* Fujitsu */
-#define MBM29LV160TE 0x22C4
-#define MBM29LV160BE 0x2249
-#define MBM29LV800BB 0x225B
-
-/* ST - www.st.com */
-#define M29W800T 0x00D7
-#define M29W160DT 0x22C4
-#define M29W160DB 0x2249
-
-/* SST */
-#define SST39LF800 0x2781
-#define SST39LF160 0x2782
-
-/* Toshiba */
-#define TC58FVT160 0x00C2
-#define TC58FVB160 0x0043
-
-#define D6_MASK 0x40
-
-struct amd_flash_private {
- int device_type;
- int interleave;
- int numchips;
- unsigned long chipshift;
- struct flchip chips[0];
-};
-
-struct amd_flash_info {
- const __u16 mfr_id;
- const __u16 dev_id;
- const char *name;
- const u_long size;
- const int numeraseregions;
- const struct mtd_erase_region_info regions[4];
-};
-
-
-
-static int amd_flash_read(struct mtd_info *, loff_t, size_t, size_t *,
- u_char *);
-static int amd_flash_write(struct mtd_info *, loff_t, size_t, size_t *,
- const u_char *);
-static int amd_flash_erase(struct mtd_info *, struct erase_info *);
-static void amd_flash_sync(struct mtd_info *);
-static int amd_flash_suspend(struct mtd_info *);
-static void amd_flash_resume(struct mtd_info *);
-static void amd_flash_destroy(struct mtd_info *);
-static struct mtd_info *amd_flash_probe(struct map_info *map);
-
-
-static struct mtd_chip_driver amd_flash_chipdrv = {
- .probe = amd_flash_probe,
- .destroy = amd_flash_destroy,
- .name = "amd_flash",
- .module = THIS_MODULE
-};
-
-static inline __u32 wide_read(struct map_info *map, __u32 addr)
-{
- if (map->buswidth == 1) {
- return map_read8(map, addr);
- } else if (map->buswidth == 2) {
- return map_read16(map, addr);
- } else if (map->buswidth == 4) {
- return map_read32(map, addr);
- }
-
- return 0;
-}
-
-static inline void wide_write(struct map_info *map, __u32 val, __u32 addr)
-{
- if (map->buswidth == 1) {
- map_write8(map, val, addr);
- } else if (map->buswidth == 2) {
- map_write16(map, val, addr);
- } else if (map->buswidth == 4) {
- map_write32(map, val, addr);
- }
-}
-
-static inline __u32 make_cmd(struct map_info *map, __u32 cmd)
-{
- const struct amd_flash_private *private = map->fldrv_priv;
- if ((private->interleave == 2) &&
- (private->device_type == DEVICE_TYPE_X16)) {
- cmd |= (cmd << 16);
- }
-
- return cmd;
-}
-
-static inline void send_unlock(struct map_info *map, unsigned long base)
-{
- wide_write(map, (CMD_UNLOCK_DATA_1 << 16) | CMD_UNLOCK_DATA_1,
- base + (map->buswidth * ADDR_UNLOCK_1));
- wide_write(map, (CMD_UNLOCK_DATA_2 << 16) | CMD_UNLOCK_DATA_2,
- base + (map->buswidth * ADDR_UNLOCK_2));
-}
-
-static inline void send_cmd(struct map_info *map, unsigned long base, __u32 cmd)
-{
- send_unlock(map, base);
- wide_write(map, make_cmd(map, cmd),
- base + (map->buswidth * ADDR_UNLOCK_1));
-}
-
-static inline void send_cmd_to_addr(struct map_info *map, unsigned long base,
- __u32 cmd, unsigned long addr)
-{
- send_unlock(map, base);
- wide_write(map, make_cmd(map, cmd), addr);
-}
-
-static inline int flash_is_busy(struct map_info *map, unsigned long addr,
- int interleave)
-{
-
- if ((interleave == 2) && (map->buswidth == 4)) {
- __u32 read1, read2;
-
- read1 = wide_read(map, addr);
- read2 = wide_read(map, addr);
-
- return (((read1 >> 16) & D6_MASK) !=
- ((read2 >> 16) & D6_MASK)) ||
- (((read1 & 0xffff) & D6_MASK) !=
- ((read2 & 0xffff) & D6_MASK));
- }
-
- return ((wide_read(map, addr) & D6_MASK) !=
- (wide_read(map, addr) & D6_MASK));
-}
-
-static inline void unlock_sector(struct map_info *map, unsigned long sect_addr,
- int unlock)
-{
- /* Sector lock address. A6 = 1 for unlock, A6 = 0 for lock */
- int SLA = unlock ?
- (sect_addr | (0x40 * map->buswidth)) :
- (sect_addr & ~(0x40 * map->buswidth)) ;
-
- __u32 cmd = make_cmd(map, CMD_UNLOCK_SECTOR);
-
- wide_write(map, make_cmd(map, CMD_RESET_DATA), 0);
- wide_write(map, cmd, SLA); /* 1st cycle: write cmd to any address */
- wide_write(map, cmd, SLA); /* 2nd cycle: write cmd to any address */
- wide_write(map, cmd, SLA); /* 3rd cycle: write cmd to SLA */
-}
-
-static inline int is_sector_locked(struct map_info *map,
- unsigned long sect_addr)
-{
- int status;
-
- wide_write(map, CMD_RESET_DATA, 0);
- send_cmd(map, sect_addr, CMD_MANUFACTURER_UNLOCK_DATA);
-
- /* status is 0x0000 for unlocked and 0x0001 for locked */
- status = wide_read(map, sect_addr + (map->buswidth * ADDR_SECTOR_LOCK));
- wide_write(map, CMD_RESET_DATA, 0);
- return status;
-}
-
-static int amd_flash_do_unlock(struct mtd_info *mtd, loff_t ofs, size_t len,
- int is_unlock)
-{
- struct map_info *map;
- struct mtd_erase_region_info *merip;
- int eraseoffset, erasesize, eraseblocks;
- int i;
- int retval = 0;
- int lock_status;
-
- map = mtd->priv;
-
- /* Pass the whole chip through sector by sector and check for each
- sector if the sector and the given interval overlap */
- for(i = 0; i < mtd->numeraseregions; i++) {
- merip = &mtd->eraseregions[i];
-
- eraseoffset = merip->offset;
- erasesize = merip->erasesize;
- eraseblocks = merip->numblocks;
-
- if (ofs > eraseoffset + erasesize)
- continue;
-
- while (eraseblocks > 0) {
- if (ofs < eraseoffset + erasesize && ofs + len > eraseoffset) {
- unlock_sector(map, eraseoffset, is_unlock);
-
- lock_status = is_sector_locked(map, eraseoffset);
-
- if (is_unlock && lock_status) {
- printk("Cannot unlock sector at address %x length %xx\n",
- eraseoffset, merip->erasesize);
- retval = -1;
- } else if (!is_unlock && !lock_status) {
- printk("Cannot lock sector at address %x length %x\n",
- eraseoffset, merip->erasesize);
- retval = -1;
- }
- }
- eraseoffset += erasesize;
- eraseblocks --;
- }
- }
- return retval;
-}
-
-static int amd_flash_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
-{
- return amd_flash_do_unlock(mtd, ofs, len, 1);
-}
-
-static int amd_flash_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
-{
- return amd_flash_do_unlock(mtd, ofs, len, 0);
-}
-
-
-/*
- * Reads JEDEC manufacturer ID and device ID and returns the index of the first
- * matching table entry (-1 if not found or alias for already found chip).
- */
-static int probe_new_chip(struct mtd_info *mtd, __u32 base,
- struct flchip *chips,
- struct amd_flash_private *private,
- const struct amd_flash_info *table, int table_size)
-{
- __u32 mfr_id;
- __u32 dev_id;
- struct map_info *map = mtd->priv;
- struct amd_flash_private temp;
- int i;
-
- temp.device_type = DEVICE_TYPE_X16; // Assume X16 (FIXME)
- temp.interleave = 2;
- map->fldrv_priv = &temp;
-
- /* Enter autoselect mode. */
- send_cmd(map, base, CMD_RESET_DATA);
- send_cmd(map, base, CMD_MANUFACTURER_UNLOCK_DATA);
-
- mfr_id = wide_read(map, base + (map->buswidth * ADDR_MANUFACTURER));
- dev_id = wide_read(map, base + (map->buswidth * ADDR_DEVICE_ID));
-
- if ((map->buswidth == 4) && ((mfr_id >> 16) == (mfr_id & 0xffff)) &&
- ((dev_id >> 16) == (dev_id & 0xffff))) {
- mfr_id &= 0xffff;
- dev_id &= 0xffff;
- } else {
- temp.interleave = 1;
- }
-
- for (i = 0; i < table_size; i++) {
- if ((mfr_id == table[i].mfr_id) &&
- (dev_id == table[i].dev_id)) {
- if (chips) {
- int j;
-
- /* Is this an alias for an already found chip?
- * In that case that chip should be in
- * autoselect mode now.
- */
- for (j = 0; j < private->numchips; j++) {
- __u32 mfr_id_other;
- __u32 dev_id_other;
-
- mfr_id_other =
- wide_read(map, chips[j].start +
- (map->buswidth *
- ADDR_MANUFACTURER
- ));
- dev_id_other =
- wide_read(map, chips[j].start +
- (map->buswidth *
- ADDR_DEVICE_ID));
- if (temp.interleave == 2) {
- mfr_id_other &= 0xffff;
- dev_id_other &= 0xffff;
- }
- if ((mfr_id_other == mfr_id) &&
- (dev_id_other == dev_id)) {
-
- /* Exit autoselect mode. */
- send_cmd(map, base,
- CMD_RESET_DATA);
-
- return -1;
- }
- }
-
- if (private->numchips == MAX_AMD_CHIPS) {
- printk(KERN_WARNING
- "%s: Too many flash chips "
- "detected. Increase "
- "MAX_AMD_CHIPS from %d.\n",
- map->name, MAX_AMD_CHIPS);
-
- return -1;
- }
-
- chips[private->numchips].start = base;
- chips[private->numchips].state = FL_READY;
- chips[private->numchips].mutex =
- &chips[private->numchips]._spinlock;
- private->numchips++;
- }
-
- printk("%s: Found %d x %ldMiB %s at 0x%x\n", map->name,
- temp.interleave, (table[i].size)/(1024*1024),
- table[i].name, base);
-
- mtd->size += table[i].size * temp.interleave;
- mtd->numeraseregions += table[i].numeraseregions;
-
- break;
- }
- }
-
- /* Exit autoselect mode. */
- send_cmd(map, base, CMD_RESET_DATA);
-
- if (i == table_size) {
- printk(KERN_DEBUG "%s: unknown flash device at 0x%x, "
- "mfr id 0x%x, dev id 0x%x\n", map->name,
- base, mfr_id, dev_id);
- map->fldrv_priv = NULL;
-
- return -1;
- }
-
- private->device_type = temp.device_type;
- private->interleave = temp.interleave;
-
- return i;
-}
-
-
-
-static struct mtd_info *amd_flash_probe(struct map_info *map)
-{
- static const struct amd_flash_info table[] = {
- {
- .mfr_id = MANUFACTURER_AMD,
- .dev_id = AM29LV160DT,
- .name = "AMD AM29LV160DT",
- .size = 0x00200000,
- .numeraseregions = 4,
- .regions = {
- { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 31 },
- { .offset = 0x1F0000, .erasesize = 0x08000, .numblocks = 1 },
- { .offset = 0x1F8000, .erasesize = 0x02000, .numblocks = 2 },
- { .offset = 0x1FC000, .erasesize = 0x04000, .numblocks = 1 }
- }
- }, {
- .mfr_id = MANUFACTURER_AMD,
- .dev_id = AM29LV160DB,
- .name = "AMD AM29LV160DB",
- .size = 0x00200000,
- .numeraseregions = 4,
- .regions = {
- { .offset = 0x000000, .erasesize = 0x04000, .numblocks = 1 },
- { .offset = 0x004000, .erasesize = 0x02000, .numblocks = 2 },
- { .offset = 0x008000, .erasesize = 0x08000, .numblocks = 1 },
- { .offset = 0x010000, .erasesize = 0x10000, .numblocks = 31 }
- }
- }, {
- .mfr_id = MANUFACTURER_TOSHIBA,
- .dev_id = TC58FVT160,
- .name = "Toshiba TC58FVT160",
- .size = 0x00200000,
- .numeraseregions = 4,
- .regions = {
- { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 31 },
- { .offset = 0x1F0000, .erasesize = 0x08000, .numblocks = 1 },
- { .offset = 0x1F8000, .erasesize = 0x02000, .numblocks = 2 },
- { .offset = 0x1FC000, .erasesize = 0x04000, .numblocks = 1 }
- }
- }, {
- .mfr_id = MANUFACTURER_FUJITSU,
- .dev_id = MBM29LV160TE,
- .name = "Fujitsu MBM29LV160TE",
- .size = 0x00200000,
- .numeraseregions = 4,
- .regions = {
- { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 31 },
- { .offset = 0x1F0000, .erasesize = 0x08000, .numblocks = 1 },
- { .offset = 0x1F8000, .erasesize = 0x02000, .numblocks = 2 },
- { .offset = 0x1FC000, .erasesize = 0x04000, .numblocks = 1 }
- }
- }, {
- .mfr_id = MANUFACTURER_TOSHIBA,
- .dev_id = TC58FVB160,
- .name = "Toshiba TC58FVB160",
- .size = 0x00200000,
- .numeraseregions = 4,
- .regions = {
- { .offset = 0x000000, .erasesize = 0x04000, .numblocks = 1 },
- { .offset = 0x004000, .erasesize = 0x02000, .numblocks = 2 },
- { .offset = 0x008000, .erasesize = 0x08000, .numblocks = 1 },
- { .offset = 0x010000, .erasesize = 0x10000, .numblocks = 31 }
- }
- }, {
- .mfr_id = MANUFACTURER_FUJITSU,
- .dev_id = MBM29LV160BE,
- .name = "Fujitsu MBM29LV160BE",
- .size = 0x00200000,
- .numeraseregions = 4,
- .regions = {
- { .offset = 0x000000, .erasesize = 0x04000, .numblocks = 1 },
- { .offset = 0x004000, .erasesize = 0x02000, .numblocks = 2 },
- { .offset = 0x008000, .erasesize = 0x08000, .numblocks = 1 },
- { .offset = 0x010000, .erasesize = 0x10000, .numblocks = 31 }
- }
- }, {
- .mfr_id = MANUFACTURER_AMD,
- .dev_id = AM29LV800BB,
- .name = "AMD AM29LV800BB",
- .size = 0x00100000,
- .numeraseregions = 4,
- .regions = {
- { .offset = 0x000000, .erasesize = 0x04000, .numblocks = 1 },
- { .offset = 0x004000, .erasesize = 0x02000, .numblocks = 2 },
- { .offset = 0x008000, .erasesize = 0x08000, .numblocks = 1 },
- { .offset = 0x010000, .erasesize = 0x10000, .numblocks = 15 }
- }
- }, {
- .mfr_id = MANUFACTURER_AMD,
- .dev_id = AM29F800BB,
- .name = "AMD AM29F800BB",
- .size = 0x00100000,
- .numeraseregions = 4,
- .regions = {
- { .offset = 0x000000, .erasesize = 0x04000, .numblocks = 1 },
- { .offset = 0x004000, .erasesize = 0x02000, .numblocks = 2 },
- { .offset = 0x008000, .erasesize = 0x08000, .numblocks = 1 },
- { .offset = 0x010000, .erasesize = 0x10000, .numblocks = 15 }
- }
- }, {
- .mfr_id = MANUFACTURER_AMD,
- .dev_id = AM29LV800BT,
- .name = "AMD AM29LV800BT",
- .size = 0x00100000,
- .numeraseregions = 4,
- .regions = {
- { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 15 },
- { .offset = 0x0F0000, .erasesize = 0x08000, .numblocks = 1 },
- { .offset = 0x0F8000, .erasesize = 0x02000, .numblocks = 2 },
- { .offset = 0x0FC000, .erasesize = 0x04000, .numblocks = 1 }
- }
- }, {
- .mfr_id = MANUFACTURER_AMD,
- .dev_id = AM29F800BT,
- .name = "AMD AM29F800BT",
- .size = 0x00100000,
- .numeraseregions = 4,
- .regions = {
- { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 15 },
- { .offset = 0x0F0000, .erasesize = 0x08000, .numblocks = 1 },
- { .offset = 0x0F8000, .erasesize = 0x02000, .numblocks = 2 },
- { .offset = 0x0FC000, .erasesize = 0x04000, .numblocks = 1 }
- }
- }, {
- .mfr_id = MANUFACTURER_AMD,
- .dev_id = AM29LV800BB,
- .name = "AMD AM29LV800BB",
- .size = 0x00100000,
- .numeraseregions = 4,
- .regions = {
- { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 15 },
- { .offset = 0x0F0000, .erasesize = 0x08000, .numblocks = 1 },
- { .offset = 0x0F8000, .erasesize = 0x02000, .numblocks = 2 },
- { .offset = 0x0FC000, .erasesize = 0x04000, .numblocks = 1 }
- }
- }, {
- .mfr_id = MANUFACTURER_FUJITSU,
- .dev_id = MBM29LV800BB,
- .name = "Fujitsu MBM29LV800BB",
- .size = 0x00100000,
- .numeraseregions = 4,
- .regions = {
- { .offset = 0x000000, .erasesize = 0x04000, .numblocks = 1 },
- { .offset = 0x004000, .erasesize = 0x02000, .numblocks = 2 },
- { .offset = 0x008000, .erasesize = 0x08000, .numblocks = 1 },
- { .offset = 0x010000, .erasesize = 0x10000, .numblocks = 15 }
- }
- }, {
- .mfr_id = MANUFACTURER_ST,
- .dev_id = M29W800T,
- .name = "ST M29W800T",
- .size = 0x00100000,
- .numeraseregions = 4,
- .regions = {
- { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 15 },
- { .offset = 0x0F0000, .erasesize = 0x08000, .numblocks = 1 },
- { .offset = 0x0F8000, .erasesize = 0x02000, .numblocks = 2 },
- { .offset = 0x0FC000, .erasesize = 0x04000, .numblocks = 1 }
- }
- }, {
- .mfr_id = MANUFACTURER_ST,
- .dev_id = M29W160DT,
- .name = "ST M29W160DT",
- .size = 0x00200000,
- .numeraseregions = 4,
- .regions = {
- { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 31 },
- { .offset = 0x1F0000, .erasesize = 0x08000, .numblocks = 1 },
- { .offset = 0x1F8000, .erasesize = 0x02000, .numblocks = 2 },
- { .offset = 0x1FC000, .erasesize = 0x04000, .numblocks = 1 }
- }
- }, {
- .mfr_id = MANUFACTURER_ST,
- .dev_id = M29W160DB,
- .name = "ST M29W160DB",
- .size = 0x00200000,
- .numeraseregions = 4,
- .regions = {
- { .offset = 0x000000, .erasesize = 0x04000, .numblocks = 1 },
- { .offset = 0x004000, .erasesize = 0x02000, .numblocks = 2 },
- { .offset = 0x008000, .erasesize = 0x08000, .numblocks = 1 },
- { .offset = 0x010000, .erasesize = 0x10000, .numblocks = 31 }
- }
- }, {
- .mfr_id = MANUFACTURER_AMD,
- .dev_id = AM29BDS323D,
- .name = "AMD AM29BDS323D",
- .size = 0x00400000,
- .numeraseregions = 3,
- .regions = {
- { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 48 },
- { .offset = 0x300000, .erasesize = 0x10000, .numblocks = 15 },
- { .offset = 0x3f0000, .erasesize = 0x02000, .numblocks = 8 },
- }
- }, {
- .mfr_id = MANUFACTURER_ATMEL,
- .dev_id = AT49xV16x,
- .name = "Atmel AT49xV16x",
- .size = 0x00200000,
- .numeraseregions = 2,
- .regions = {
- { .offset = 0x000000, .erasesize = 0x02000, .numblocks = 8 },
- { .offset = 0x010000, .erasesize = 0x10000, .numblocks = 31 }
- }
- }, {
- .mfr_id = MANUFACTURER_ATMEL,
- .dev_id = AT49xV16xT,
- .name = "Atmel AT49xV16xT",
- .size = 0x00200000,
- .numeraseregions = 2,
- .regions = {
- { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 31 },
- { .offset = 0x1F0000, .erasesize = 0x02000, .numblocks = 8 }
- }
- }
- };
-
- struct mtd_info *mtd;
- struct flchip chips[MAX_AMD_CHIPS];
- int table_pos[MAX_AMD_CHIPS];
- struct amd_flash_private temp;
- struct amd_flash_private *private;
- u_long size;
- unsigned long base;
- int i;
- int reg_idx;
- int offset;
-
- mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
- if (!mtd) {
- printk(KERN_WARNING
- "%s: kmalloc failed for info structure\n", map->name);
- return NULL;
- }
- mtd->priv = map;
-
- memset(&temp, 0, sizeof(temp));
-
- printk("%s: Probing for AMD compatible flash...\n", map->name);
-
- if ((table_pos[0] = probe_new_chip(mtd, 0, NULL, &temp, table,
- ARRAY_SIZE(table)))
- == -1) {
- printk(KERN_WARNING
- "%s: Found no AMD compatible device at location zero\n",
- map->name);
- kfree(mtd);
-
- return NULL;
- }
-
- chips[0].start = 0;
- chips[0].state = FL_READY;
- chips[0].mutex = &chips[0]._spinlock;
- temp.numchips = 1;
- for (size = mtd->size; size > 1; size >>= 1) {
- temp.chipshift++;
- }
- switch (temp.interleave) {
- case 2:
- temp.chipshift += 1;
- break;
- case 4:
- temp.chipshift += 2;
- break;
- }
-
- /* Find out if there are any more chips in the map. */
- for (base = (1 << temp.chipshift);
- base < map->size;
- base += (1 << temp.chipshift)) {
- int numchips = temp.numchips;
- table_pos[numchips] = probe_new_chip(mtd, base, chips,
- &temp, table, ARRAY_SIZE(table));
- }
-
- mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info) *
- mtd->numeraseregions, GFP_KERNEL);
- if (!mtd->eraseregions) {
- printk(KERN_WARNING "%s: Failed to allocate "
- "memory for MTD erase region info\n", map->name);
- kfree(mtd);
- map->fldrv_priv = NULL;
- return NULL;
- }
-
- reg_idx = 0;
- offset = 0;
- for (i = 0; i < temp.numchips; i++) {
- int dev_size;
- int j;
-
- dev_size = 0;
- for (j = 0; j < table[table_pos[i]].numeraseregions; j++) {
- mtd->eraseregions[reg_idx].offset = offset +
- (table[table_pos[i]].regions[j].offset *
- temp.interleave);
- mtd->eraseregions[reg_idx].erasesize =
- table[table_pos[i]].regions[j].erasesize *
- temp.interleave;
- mtd->eraseregions[reg_idx].numblocks =
- table[table_pos[i]].regions[j].numblocks;
- if (mtd->erasesize <
- mtd->eraseregions[reg_idx].erasesize) {
- mtd->erasesize =
- mtd->eraseregions[reg_idx].erasesize;
- }
- dev_size += mtd->eraseregions[reg_idx].erasesize *
- mtd->eraseregions[reg_idx].numblocks;
- reg_idx++;
- }
- offset += dev_size;
- }
- mtd->type = MTD_NORFLASH;
- mtd->writesize = 1;
- mtd->flags = MTD_CAP_NORFLASH;
- mtd->name = map->name;
- mtd->erase = amd_flash_erase;
- mtd->read = amd_flash_read;
- mtd->write = amd_flash_write;
- mtd->sync = amd_flash_sync;
- mtd->suspend = amd_flash_suspend;
- mtd->resume = amd_flash_resume;
- mtd->lock = amd_flash_lock;
- mtd->unlock = amd_flash_unlock;
-
- private = kmalloc(sizeof(*private) + (sizeof(struct flchip) *
- temp.numchips), GFP_KERNEL);
- if (!private) {
- printk(KERN_WARNING
- "%s: kmalloc failed for private structure\n", map->name);
- kfree(mtd);
- map->fldrv_priv = NULL;
- return NULL;
- }
- memcpy(private, &temp, sizeof(temp));
- memcpy(private->chips, chips,
- sizeof(struct flchip) * private->numchips);
- for (i = 0; i < private->numchips; i++) {
- init_waitqueue_head(&private->chips[i].wq);
- spin_lock_init(&private->chips[i]._spinlock);
- }
-
- map->fldrv_priv = private;
-
- map->fldrv = &amd_flash_chipdrv;
-
- __module_get(THIS_MODULE);
- return mtd;
-}
-
-
-
-static inline int read_one_chip(struct map_info *map, struct flchip *chip,
- loff_t adr, size_t len, u_char *buf)
-{
- DECLARE_WAITQUEUE(wait, current);
- unsigned long timeo = jiffies + HZ;
-
-retry:
- spin_lock_bh(chip->mutex);
-
- if (chip->state != FL_READY){
- printk(KERN_INFO "%s: waiting for chip to read, state = %d\n",
- map->name, chip->state);
- set_current_state(TASK_UNINTERRUPTIBLE);
- add_wait_queue(&chip->wq, &wait);
-
- spin_unlock_bh(chip->mutex);
-
- schedule();
- remove_wait_queue(&chip->wq, &wait);
-
- if(signal_pending(current)) {
- return -EINTR;
- }
-
- timeo = jiffies + HZ;
-
- goto retry;
- }
-
- adr += chip->start;
-
- chip->state = FL_READY;
-
- map_copy_from(map, buf, adr, len);
-
- wake_up(&chip->wq);
- spin_unlock_bh(chip->mutex);
-
- return 0;
-}
-
-
-
-static int amd_flash_read(struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, u_char *buf)
-{
- struct map_info *map = mtd->priv;
- struct amd_flash_private *private = map->fldrv_priv;
- unsigned long ofs;
- int chipnum;
- int ret = 0;
-
- if ((from + len) > mtd->size) {
- printk(KERN_WARNING "%s: read request past end of device "
- "(0x%lx)\n", map->name, (unsigned long)from + len);
-
- return -EINVAL;
- }
-
- /* Offset within the first chip that the first read should start. */
- chipnum = (from >> private->chipshift);
- ofs = from - (chipnum << private->chipshift);
-
- *retlen = 0;
-
- while (len) {
- unsigned long this_len;
-
- if (chipnum >= private->numchips) {
- break;
- }
-
- if ((len + ofs - 1) >> private->chipshift) {
- this_len = (1 << private->chipshift) - ofs;
- } else {
- this_len = len;
- }
-
- ret = read_one_chip(map, &private->chips[chipnum], ofs,
- this_len, buf);
- if (ret) {
- break;
- }
-
- *retlen += this_len;
- len -= this_len;
- buf += this_len;
-
- ofs = 0;
- chipnum++;
- }
-
- return ret;
-}
-
-
-
-static int write_one_word(struct map_info *map, struct flchip *chip,
- unsigned long adr, __u32 datum)
-{
- unsigned long timeo = jiffies + HZ;
- struct amd_flash_private *private = map->fldrv_priv;
- DECLARE_WAITQUEUE(wait, current);
- int ret = 0;
- int times_left;
-
-retry:
- spin_lock_bh(chip->mutex);
-
- if (chip->state != FL_READY){
- printk("%s: waiting for chip to write, state = %d\n",
- map->name, chip->state);
- set_current_state(TASK_UNINTERRUPTIBLE);
- add_wait_queue(&chip->wq, &wait);
-
- spin_unlock_bh(chip->mutex);
-
- schedule();
- remove_wait_queue(&chip->wq, &wait);
- printk(KERN_INFO "%s: woke up to write\n", map->name);
- if(signal_pending(current))
- return -EINTR;
-
- timeo = jiffies + HZ;
-
- goto retry;
- }
-
- chip->state = FL_WRITING;
-
- adr += chip->start;
- ENABLE_VPP(map);
- send_cmd(map, chip->start, CMD_PROGRAM_UNLOCK_DATA);
- wide_write(map, datum, adr);
-
- times_left = 500000;
- while (times_left-- && flash_is_busy(map, adr, private->interleave)) {
- if (need_resched()) {
- spin_unlock_bh(chip->mutex);
- schedule();
- spin_lock_bh(chip->mutex);
- }
- }
-
- if (!times_left) {
- printk(KERN_WARNING "%s: write to 0x%lx timed out!\n",
- map->name, adr);
- ret = -EIO;
- } else {
- __u32 verify;
- if ((verify = wide_read(map, adr)) != datum) {
- printk(KERN_WARNING "%s: write to 0x%lx failed. "
- "datum = %x, verify = %x\n",
- map->name, adr, datum, verify);
- ret = -EIO;
- }
- }
-
- DISABLE_VPP(map);
- chip->state = FL_READY;
- wake_up(&chip->wq);
- spin_unlock_bh(chip->mutex);
-
- return ret;
-}
-
-
-
-static int amd_flash_write(struct mtd_info *mtd, loff_t to , size_t len,
- size_t *retlen, const u_char *buf)
-{
- struct map_info *map = mtd->priv;
- struct amd_flash_private *private = map->fldrv_priv;
- int ret = 0;
- int chipnum;
- unsigned long ofs;
- unsigned long chipstart;
-
- *retlen = 0;
- if (!len) {
- return 0;
- }
-
- chipnum = to >> private->chipshift;
- ofs = to - (chipnum << private->chipshift);
- chipstart = private->chips[chipnum].start;
-
- /* If it's not bus-aligned, do the first byte write. */
- if (ofs & (map->buswidth - 1)) {
- unsigned long bus_ofs = ofs & ~(map->buswidth - 1);
- int i = ofs - bus_ofs;
- int n = 0;
- u_char tmp_buf[4];
- __u32 datum;
-
- map_copy_from(map, tmp_buf,
- bus_ofs + private->chips[chipnum].start,
- map->buswidth);
- while (len && i < map->buswidth)
- tmp_buf[i++] = buf[n++], len--;
-
- if (map->buswidth == 2) {
- datum = *(__u16*)tmp_buf;
- } else if (map->buswidth == 4) {
- datum = *(__u32*)tmp_buf;
- } else {
- return -EINVAL; /* should never happen, but be safe */
- }
-
- ret = write_one_word(map, &private->chips[chipnum], bus_ofs,
- datum);
- if (ret) {
- return ret;
- }
-
- ofs += n;
- buf += n;
- (*retlen) += n;
-
- if (ofs >> private->chipshift) {
- chipnum++;
- ofs = 0;
- if (chipnum == private->numchips) {
- return 0;
- }
- }
- }
-
- /* We are now aligned, write as much as possible. */
- while(len >= map->buswidth) {
- __u32 datum;
-
- if (map->buswidth == 1) {
- datum = *(__u8*)buf;
- } else if (map->buswidth == 2) {
- datum = *(__u16*)buf;
- } else if (map->buswidth == 4) {
- datum = *(__u32*)buf;
- } else {
- return -EINVAL;
- }
-
- ret = write_one_word(map, &private->chips[chipnum], ofs, datum);
-
- if (ret) {
- return ret;
- }
-
- ofs += map->buswidth;
- buf += map->buswidth;
- (*retlen) += map->buswidth;
- len -= map->buswidth;
-
- if (ofs >> private->chipshift) {
- chipnum++;
- ofs = 0;
- if (chipnum == private->numchips) {
- return 0;
- }
- chipstart = private->chips[chipnum].start;
- }
- }
-
- if (len & (map->buswidth - 1)) {
- int i = 0, n = 0;
- u_char tmp_buf[2];
- __u32 datum;
-
- map_copy_from(map, tmp_buf,
- ofs + private->chips[chipnum].start,
- map->buswidth);
- while (len--) {
- tmp_buf[i++] = buf[n++];
- }
-
- if (map->buswidth == 2) {
- datum = *(__u16*)tmp_buf;
- } else if (map->buswidth == 4) {
- datum = *(__u32*)tmp_buf;
- } else {
- return -EINVAL; /* should never happen, but be safe */
- }
-
- ret = write_one_word(map, &private->chips[chipnum], ofs, datum);
-
- if (ret) {
- return ret;
- }
-
- (*retlen) += n;
- }
-
- return 0;
-}
-
-
-
-static inline int erase_one_block(struct map_info *map, struct flchip *chip,
- unsigned long adr, u_long size)
-{
- unsigned long timeo = jiffies + HZ;
- struct amd_flash_private *private = map->fldrv_priv;
- DECLARE_WAITQUEUE(wait, current);
-
-retry:
- spin_lock_bh(chip->mutex);
-
- if (chip->state != FL_READY){
- set_current_state(TASK_UNINTERRUPTIBLE);
- add_wait_queue(&chip->wq, &wait);
-
- spin_unlock_bh(chip->mutex);
-
- schedule();
- remove_wait_queue(&chip->wq, &wait);
-
- if (signal_pending(current)) {
- return -EINTR;
- }
-
- timeo = jiffies + HZ;
-
- goto retry;
- }
-
- chip->state = FL_ERASING;
-
- adr += chip->start;
- ENABLE_VPP(map);
- send_cmd(map, chip->start, CMD_SECTOR_ERASE_UNLOCK_DATA);
- send_cmd_to_addr(map, chip->start, CMD_SECTOR_ERASE_UNLOCK_DATA_2, adr);
-
- timeo = jiffies + (HZ * 20);
-
- spin_unlock_bh(chip->mutex);
- msleep(1000);
- spin_lock_bh(chip->mutex);
-
- while (flash_is_busy(map, adr, private->interleave)) {
-
- if (chip->state != FL_ERASING) {
- /* Someone's suspended the erase. Sleep */
- set_current_state(TASK_UNINTERRUPTIBLE);
- add_wait_queue(&chip->wq, &wait);
-
- spin_unlock_bh(chip->mutex);
- printk(KERN_INFO "%s: erase suspended. Sleeping\n",
- map->name);
- schedule();
- remove_wait_queue(&chip->wq, &wait);
-
- if (signal_pending(current)) {
- return -EINTR;
- }
-
- timeo = jiffies + (HZ*2); /* FIXME */
- spin_lock_bh(chip->mutex);
- continue;
- }
-
- /* OK Still waiting */
- if (time_after(jiffies, timeo)) {
- chip->state = FL_READY;
- spin_unlock_bh(chip->mutex);
- printk(KERN_WARNING "%s: waiting for erase to complete "
- "timed out.\n", map->name);
- DISABLE_VPP(map);
-
- return -EIO;
- }
-
- /* Latency issues. Drop the lock, wait a while and retry */
- spin_unlock_bh(chip->mutex);
-
- if (need_resched())
- schedule();
- else
- udelay(1);
-
- spin_lock_bh(chip->mutex);
- }
-
- /* Verify every single word */
- {
- int address;
- int error = 0;
- __u8 verify;
-
- for (address = adr; address < (adr + size); address++) {
- if ((verify = map_read8(map, address)) != 0xFF) {
- error = 1;
- break;
- }
- }
- if (error) {
- chip->state = FL_READY;
- spin_unlock_bh(chip->mutex);
- printk(KERN_WARNING
- "%s: verify error at 0x%x, size %ld.\n",
- map->name, address, size);
- DISABLE_VPP(map);
-
- return -EIO;
- }
- }
-
- DISABLE_VPP(map);
- chip->state = FL_READY;
- wake_up(&chip->wq);
- spin_unlock_bh(chip->mutex);
-
- return 0;
-}
-
-
-
-static int amd_flash_erase(struct mtd_info *mtd, struct erase_info *instr)
-{
- struct map_info *map = mtd->priv;
- struct amd_flash_private *private = map->fldrv_priv;
- unsigned long adr, len;
- int chipnum;
- int ret = 0;
- int i;
- int first;
- struct mtd_erase_region_info *regions = mtd->eraseregions;
-
- if (instr->addr > mtd->size) {
- return -EINVAL;
- }
-
- if ((instr->len + instr->addr) > mtd->size) {
- return -EINVAL;
- }
-
- /* Check that both start and end of the requested erase are
- * aligned with the erasesize at the appropriate addresses.
- */
-
- i = 0;
-
- /* Skip all erase regions which are ended before the start of
- the requested erase. Actually, to save on the calculations,
- we skip to the first erase region which starts after the
- start of the requested erase, and then go back one.
- */
-
- while ((i < mtd->numeraseregions) &&
- (instr->addr >= regions[i].offset)) {
- i++;
- }
- i--;
-
- /* OK, now i is pointing at the erase region in which this
- * erase request starts. Check the start of the requested
- * erase range is aligned with the erase size which is in
- * effect here.
- */
-
- if (instr->addr & (regions[i].erasesize-1)) {
- return -EINVAL;
- }
-
- /* Remember the erase region we start on. */
-
- first = i;
-
- /* Next, check that the end of the requested erase is aligned
- * with the erase region at that address.
- */
-
- while ((i < mtd->numeraseregions) &&
- ((instr->addr + instr->len) >= regions[i].offset)) {
- i++;
- }
-
- /* As before, drop back one to point at the region in which
- * the address actually falls.
- */
-
- i--;
-
- if ((instr->addr + instr->len) & (regions[i].erasesize-1)) {
- return -EINVAL;
- }
-
- chipnum = instr->addr >> private->chipshift;
- adr = instr->addr - (chipnum << private->chipshift);
- len = instr->len;
-
- i = first;
-
- while (len) {
- ret = erase_one_block(map, &private->chips[chipnum], adr,
- regions[i].erasesize);
-
- if (ret) {
- return ret;
- }
-
- adr += regions[i].erasesize;
- len -= regions[i].erasesize;
-
- if ((adr % (1 << private->chipshift)) ==
- ((regions[i].offset + (regions[i].erasesize *
- regions[i].numblocks))
- % (1 << private->chipshift))) {
- i++;
- }
-
- if (adr >> private->chipshift) {
- adr = 0;
- chipnum++;
- if (chipnum >= private->numchips) {
- break;
- }
- }
- }
-
- instr->state = MTD_ERASE_DONE;
- mtd_erase_callback(instr);
-
- return 0;
-}
-
-
-
-static void amd_flash_sync(struct mtd_info *mtd)
-{
- struct map_info *map = mtd->priv;
- struct amd_flash_private *private = map->fldrv_priv;
- int i;
- struct flchip *chip;
- int ret = 0;
- DECLARE_WAITQUEUE(wait, current);
-
- for (i = 0; !ret && (i < private->numchips); i++) {
- chip = &private->chips[i];
-
- retry:
- spin_lock_bh(chip->mutex);
-
- switch(chip->state) {
- case FL_READY:
- case FL_STATUS:
- case FL_CFI_QUERY:
- case FL_JEDEC_QUERY:
- chip->oldstate = chip->state;
- chip->state = FL_SYNCING;
- /* No need to wake_up() on this state change -
- * as the whole point is that nobody can do anything
- * with the chip now anyway.
- */
- case FL_SYNCING:
- spin_unlock_bh(chip->mutex);
- break;
-
- default:
- /* Not an idle state */
- add_wait_queue(&chip->wq, &wait);
-
- spin_unlock_bh(chip->mutex);
-
- schedule();
-
- remove_wait_queue(&chip->wq, &wait);
-
- goto retry;
- }
- }
-
- /* Unlock the chips again */
- for (i--; i >= 0; i--) {
- chip = &private->chips[i];
-
- spin_lock_bh(chip->mutex);
-
- if (chip->state == FL_SYNCING) {
- chip->state = chip->oldstate;
- wake_up(&chip->wq);
- }
- spin_unlock_bh(chip->mutex);
- }
-}
-
-
-
-static int amd_flash_suspend(struct mtd_info *mtd)
-{
-printk("amd_flash_suspend(): not implemented!\n");
- return -EINVAL;
-}
-
-
-
-static void amd_flash_resume(struct mtd_info *mtd)
-{
-printk("amd_flash_resume(): not implemented!\n");
-}
-
-
-
-static void amd_flash_destroy(struct mtd_info *mtd)
-{
- struct map_info *map = mtd->priv;
- struct amd_flash_private *private = map->fldrv_priv;
- kfree(private);
-}
-
-int __init amd_flash_init(void)
-{
- register_mtd_chip_driver(&amd_flash_chipdrv);
- return 0;
-}
-
-void __exit amd_flash_exit(void)
-{
- unregister_mtd_chip_driver(&amd_flash_chipdrv);
-}
-
-module_init(amd_flash_init);
-module_exit(amd_flash_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Jonas Holmberg <jonas.holmberg@axis.com>");
-MODULE_DESCRIPTION("Old MTD chip driver for AMD flash chips");
diff --git a/drivers/mtd/chips/jedec.c b/drivers/mtd/chips/jedec.c
deleted file mode 100644
index 14e57b2bf84..00000000000
--- a/drivers/mtd/chips/jedec.c
+++ /dev/null
@@ -1,935 +0,0 @@
-
-/* JEDEC Flash Interface.
- * This is an older type of interface for self programming flash. It is
- * commonly use in older AMD chips and is obsolete compared with CFI.
- * It is called JEDEC because the JEDEC association distributes the ID codes
- * for the chips.
- *
- * See the AMD flash databook for information on how to operate the interface.
- *
- * This code does not support anything wider than 8 bit flash chips, I am
- * not going to guess how to send commands to them, plus I expect they will
- * all speak CFI..
- *
- * $Id: jedec.c,v 1.22 2005/01/05 18:05:11 dwmw2 Exp $
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/mtd/jedec.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/compatmac.h>
-
-static struct mtd_info *jedec_probe(struct map_info *);
-static int jedec_probe8(struct map_info *map,unsigned long base,
- struct jedec_private *priv);
-static int jedec_probe16(struct map_info *map,unsigned long base,
- struct jedec_private *priv);
-static int jedec_probe32(struct map_info *map,unsigned long base,
- struct jedec_private *priv);
-static void jedec_flash_chip_scan(struct jedec_private *priv,unsigned long start,
- unsigned long len);
-static int flash_erase(struct mtd_info *mtd, struct erase_info *instr);
-static int flash_write(struct mtd_info *mtd, loff_t start, size_t len,
- size_t *retlen, const u_char *buf);
-
-static unsigned long my_bank_size;
-
-/* Listing of parts and sizes. We need this table to learn the sector
- size of the chip and the total length */
-static const struct JEDECTable JEDEC_table[] = {
- {
- .jedec = 0x013D,
- .name = "AMD Am29F017D",
- .size = 2*1024*1024,
- .sectorsize = 64*1024,
- .capabilities = MTD_CAP_NORFLASH
- },
- {
- .jedec = 0x01AD,
- .name = "AMD Am29F016",
- .size = 2*1024*1024,
- .sectorsize = 64*1024,
- .capabilities = MTD_CAP_NORFLASH
- },
- {
- .jedec = 0x01D5,
- .name = "AMD Am29F080",
- .size = 1*1024*1024,
- .sectorsize = 64*1024,
- .capabilities = MTD_CAP_NORFLASH
- },
- {
- .jedec = 0x01A4,
- .name = "AMD Am29F040",
- .size = 512*1024,
- .sectorsize = 64*1024,
- .capabilities = MTD_CAP_NORFLASH
- },
- {
- .jedec = 0x20E3,
- .name = "AMD Am29W040B",
- .size = 512*1024,
- .sectorsize = 64*1024,
- .capabilities = MTD_CAP_NORFLASH
- },
- {
- .jedec = 0xC2AD,
- .name = "Macronix MX29F016",
- .size = 2*1024*1024,
- .sectorsize = 64*1024,
- .capabilities = MTD_CAP_NORFLASH
- },
- { .jedec = 0x0 }
-};
-
-static const struct JEDECTable *jedec_idtoinf(__u8 mfr,__u8 id);
-static void jedec_sync(struct mtd_info *mtd) {};
-static int jedec_read(struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, u_char *buf);
-static int jedec_read_banked(struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, u_char *buf);
-
-static struct mtd_info *jedec_probe(struct map_info *map);
-
-
-
-static struct mtd_chip_driver jedec_chipdrv = {
- .probe = jedec_probe,
- .name = "jedec",
- .module = THIS_MODULE
-};
-
-/* Probe entry point */
-
-static struct mtd_info *jedec_probe(struct map_info *map)
-{
- struct mtd_info *MTD;
- struct jedec_private *priv;
- unsigned long Base;
- unsigned long SectorSize;
- unsigned count;
- unsigned I,Uniq;
- char Part[200];
- memset(&priv,0,sizeof(priv));
-
- MTD = kzalloc(sizeof(struct mtd_info) + sizeof(struct jedec_private), GFP_KERNEL);
- if (!MTD)
- return NULL;
-
- priv = (struct jedec_private *)&MTD[1];
-
- my_bank_size = map->size;
-
- if (map->size/my_bank_size > MAX_JEDEC_CHIPS)
- {
- printk("mtd: Increase MAX_JEDEC_CHIPS, too many banks.\n");
- kfree(MTD);
- return NULL;
- }
-
- for (Base = 0; Base < map->size; Base += my_bank_size)
- {
- // Perhaps zero could designate all tests?
- if (map->buswidth == 0)
- map->buswidth = 1;
-
- if (map->buswidth == 1){
- if (jedec_probe8(map,Base,priv) == 0) {
- printk("did recognize jedec chip\n");
- kfree(MTD);
- return NULL;
- }
- }
- if (map->buswidth == 2)
- jedec_probe16(map,Base,priv);
- if (map->buswidth == 4)
- jedec_probe32(map,Base,priv);
- }
-
- // Get the biggest sector size
- SectorSize = 0;
- for (I = 0; priv->chips[I].jedec != 0 && I < MAX_JEDEC_CHIPS; I++)
- {
- // printk("priv->chips[%d].jedec is %x\n",I,priv->chips[I].jedec);
- // printk("priv->chips[%d].sectorsize is %lx\n",I,priv->chips[I].sectorsize);
- if (priv->chips[I].sectorsize > SectorSize)
- SectorSize = priv->chips[I].sectorsize;
- }
-
- // Quickly ensure that the other sector sizes are factors of the largest
- for (I = 0; priv->chips[I].jedec != 0 && I < MAX_JEDEC_CHIPS; I++)
- {
- if ((SectorSize/priv->chips[I].sectorsize)*priv->chips[I].sectorsize != SectorSize)
- {
- printk("mtd: Failed. Device has incompatible mixed sector sizes\n");
- kfree(MTD);
- return NULL;
- }
- }
-
- /* Generate a part name that includes the number of different chips and
- other configuration information */
- count = 1;
- strlcpy(Part,map->name,sizeof(Part)-10);
- strcat(Part," ");
- Uniq = 0;
- for (I = 0; priv->chips[I].jedec != 0 && I < MAX_JEDEC_CHIPS; I++)
- {
- const struct JEDECTable *JEDEC;
-
- if (priv->chips[I+1].jedec == priv->chips[I].jedec)
- {
- count++;
- continue;
- }
-
- // Locate the chip in the jedec table
- JEDEC = jedec_idtoinf(priv->chips[I].jedec >> 8,priv->chips[I].jedec);
- if (JEDEC == 0)
- {
- printk("mtd: Internal Error, JEDEC not set\n");
- kfree(MTD);
- return NULL;
- }
-
- if (Uniq != 0)
- strcat(Part,",");
- Uniq++;
-
- if (count != 1)
- sprintf(Part+strlen(Part),"%x*[%s]",count,JEDEC->name);
- else
- sprintf(Part+strlen(Part),"%s",JEDEC->name);
- if (strlen(Part) > sizeof(Part)*2/3)
- break;
- count = 1;
- }
-
- /* Determine if the chips are organized in a linear fashion, or if there
- are empty banks. Note, the last bank does not count here, only the
- first banks are important. Holes on non-bank boundaries can not exist
- due to the way the detection algorithm works. */
- if (priv->size < my_bank_size)
- my_bank_size = priv->size;
- priv->is_banked = 0;
- //printk("priv->size is %x, my_bank_size is %x\n",priv->size,my_bank_size);
- //printk("priv->bank_fill[0] is %x\n",priv->bank_fill[0]);
- if (!priv->size) {
- printk("priv->size is zero\n");
- kfree(MTD);
- return NULL;
- }
- if (priv->size/my_bank_size) {
- if (priv->size/my_bank_size == 1) {
- priv->size = my_bank_size;
- }
- else {
- for (I = 0; I != priv->size/my_bank_size - 1; I++)
- {
- if (priv->bank_fill[I] != my_bank_size)
- priv->is_banked = 1;
-
- /* This even could be eliminated, but new de-optimized read/write
- functions have to be written */
- printk("priv->bank_fill[%d] is %lx, priv->bank_fill[0] is %lx\n",I,priv->bank_fill[I],priv->bank_fill[0]);
- if (priv->bank_fill[I] != priv->bank_fill[0])
- {
- printk("mtd: Failed. Cannot handle unsymmetric banking\n");
- kfree(MTD);
- return NULL;
- }
- }
- }
- }
- if (priv->is_banked == 1)
- strcat(Part,", banked");
-
- // printk("Part: '%s'\n",Part);
-
- memset(MTD,0,sizeof(*MTD));
- // strlcpy(MTD->name,Part,sizeof(MTD->name));
- MTD->name = map->name;
- MTD->type = MTD_NORFLASH;
- MTD->flags = MTD_CAP_NORFLASH;
- MTD->writesize = 1;
- MTD->erasesize = SectorSize*(map->buswidth);
- // printk("MTD->erasesize is %x\n",(unsigned int)MTD->erasesize);
- MTD->size = priv->size;
- // printk("MTD->size is %x\n",(unsigned int)MTD->size);
- //MTD->module = THIS_MODULE; // ? Maybe this should be the low level module?
- MTD->erase = flash_erase;
- if (priv->is_banked == 1)
- MTD->read = jedec_read_banked;
- else
- MTD->read = jedec_read;
- MTD->write = flash_write;
- MTD->sync = jedec_sync;
- MTD->priv = map;
- map->fldrv_priv = priv;
- map->fldrv = &jedec_chipdrv;
- __module_get(THIS_MODULE);
- return MTD;
-}
-
-/* Helper for the JEDEC function, JEDEC numbers all have odd parity */
-static int checkparity(u_char C)
-{
- u_char parity = 0;
- while (C != 0)
- {
- parity ^= C & 1;
- C >>= 1;
- }
-
- return parity == 1;
-}
-
-
-/* Take an array of JEDEC numbers that represent interleved flash chips
- and process them. Check to make sure they are good JEDEC numbers, look
- them up and then add them to the chip list */
-static int handle_jedecs(struct map_info *map,__u8 *Mfg,__u8 *Id,unsigned Count,
- unsigned long base,struct jedec_private *priv)
-{
- unsigned I,J;
- unsigned long Size;
- unsigned long SectorSize;
- const struct JEDECTable *JEDEC;
-
- // Test #2 JEDEC numbers exhibit odd parity
- for (I = 0; I != Count; I++)
- {
- if (checkparity(Mfg[I]) == 0 || checkparity(Id[I]) == 0)
- return 0;
- }
-
- // Finally, just make sure all the chip sizes are the same
- JEDEC = jedec_idtoinf(Mfg[0],Id[0]);
-
- if (JEDEC == 0)
- {
- printk("mtd: Found JEDEC flash chip, but do not have a table entry for %x:%x\n",Mfg[0],Mfg[1]);
- return 0;
- }
-
- Size = JEDEC->size;
- SectorSize = JEDEC->sectorsize;
- for (I = 0; I != Count; I++)
- {
- JEDEC = jedec_idtoinf(Mfg[0],Id[0]);
- if (JEDEC == 0)
- {
- printk("mtd: Found JEDEC flash chip, but do not have a table entry for %x:%x\n",Mfg[0],Mfg[1]);
- return 0;
- }
-
- if (Size != JEDEC->size || SectorSize != JEDEC->sectorsize)
- {
- printk("mtd: Failed. Interleved flash does not have matching characteristics\n");
- return 0;
- }
- }
-
- // Load the Chips
- for (I = 0; I != MAX_JEDEC_CHIPS; I++)
- {
- if (priv->chips[I].jedec == 0)
- break;
- }
-
- if (I + Count > MAX_JEDEC_CHIPS)
- {
- printk("mtd: Device has too many chips. Increase MAX_JEDEC_CHIPS\n");
- return 0;
- }
-
- // Add them to the table
- for (J = 0; J != Count; J++)
- {
- unsigned long Bank;
-
- JEDEC = jedec_idtoinf(Mfg[J],Id[J]);
- priv->chips[I].jedec = (Mfg[J] << 8) | Id[J];
- priv->chips[I].size = JEDEC->size;
- priv->chips[I].sectorsize = JEDEC->sectorsize;
- priv->chips[I].base = base + J;
- priv->chips[I].datashift = J*8;
- priv->chips[I].capabilities = JEDEC->capabilities;
- priv->chips[I].offset = priv->size + J;
-
- // log2 n :|
- priv->chips[I].addrshift = 0;
- for (Bank = Count; Bank != 1; Bank >>= 1, priv->chips[I].addrshift++);
-
- // Determine how filled this bank is.
- Bank = base & (~(my_bank_size-1));
- if (priv->bank_fill[Bank/my_bank_size] < base +
- (JEDEC->size << priv->chips[I].addrshift) - Bank)
- priv->bank_fill[Bank/my_bank_size] = base + (JEDEC->size << priv->chips[I].addrshift) - Bank;
- I++;
- }
-
- priv->size += priv->chips[I-1].size*Count;
-
- return priv->chips[I-1].size;
-}
-
-/* Lookup the chip information from the JEDEC ID table. */
-static const struct JEDECTable *jedec_idtoinf(__u8 mfr,__u8 id)
-{
- __u16 Id = (mfr << 8) | id;
- unsigned long I = 0;
- for (I = 0; JEDEC_table[I].jedec != 0; I++)
- if (JEDEC_table[I].jedec == Id)
- return JEDEC_table + I;
- return NULL;
-}
-
-// Look for flash using an 8 bit bus interface
-static int jedec_probe8(struct map_info *map,unsigned long base,
- struct jedec_private *priv)
-{
- #define flread(x) map_read8(map,base+x)
- #define flwrite(v,x) map_write8(map,v,base+x)
-
- const unsigned long AutoSel1 = 0xAA;
- const unsigned long AutoSel2 = 0x55;
- const unsigned long AutoSel3 = 0x90;
- const unsigned long Reset = 0xF0;
- __u32 OldVal;
- __u8 Mfg[1];
- __u8 Id[1];
- unsigned I;
- unsigned long Size;
-
- // Wait for any write/erase operation to settle
- OldVal = flread(base);
- for (I = 0; OldVal != flread(base) && I < 10000; I++)
- OldVal = flread(base);
-
- // Reset the chip
- flwrite(Reset,0x555);
-
- // Send the sequence
- flwrite(AutoSel1,0x555);
- flwrite(AutoSel2,0x2AA);
- flwrite(AutoSel3,0x555);
-
- // Get the JEDEC numbers
- Mfg[0] = flread(0);
- Id[0] = flread(1);
- // printk("Mfg is %x, Id is %x\n",Mfg[0],Id[0]);
-
- Size = handle_jedecs(map,Mfg,Id,1,base,priv);
- // printk("handle_jedecs Size is %x\n",(unsigned int)Size);
- if (Size == 0)
- {
- flwrite(Reset,0x555);
- return 0;
- }
-
-
- // Reset.
- flwrite(Reset,0x555);
-
- return 1;
-
- #undef flread
- #undef flwrite
-}
-
-// Look for flash using a 16 bit bus interface (ie 2 8-bit chips)
-static int jedec_probe16(struct map_info *map,unsigned long base,
- struct jedec_private *priv)
-{
- return 0;
-}
-
-// Look for flash using a 32 bit bus interface (ie 4 8-bit chips)
-static int jedec_probe32(struct map_info *map,unsigned long base,
- struct jedec_private *priv)
-{
- #define flread(x) map_read32(map,base+((x)<<2))
- #define flwrite(v,x) map_write32(map,v,base+((x)<<2))
-
- const unsigned long AutoSel1 = 0xAAAAAAAA;
- const unsigned long AutoSel2 = 0x55555555;
- const unsigned long AutoSel3 = 0x90909090;
- const unsigned long Reset = 0xF0F0F0F0;
- __u32 OldVal;
- __u8 Mfg[4];
- __u8 Id[4];
- unsigned I;
- unsigned long Size;
-
- // Wait for any write/erase operation to settle
- OldVal = flread(base);
- for (I = 0; OldVal != flread(base) && I < 10000; I++)
- OldVal = flread(base);
-
- // Reset the chip
- flwrite(Reset,0x555);
-
- // Send the sequence
- flwrite(AutoSel1,0x555);
- flwrite(AutoSel2,0x2AA);
- flwrite(AutoSel3,0x555);
-
- // Test #1, JEDEC numbers are readable from 0x??00/0x??01
- if (flread(0) != flread(0x100) ||
- flread(1) != flread(0x101))
- {
- flwrite(Reset,0x555);
- return 0;
- }
-
- // Split up the JEDEC numbers
- OldVal = flread(0);
- for (I = 0; I != 4; I++)
- Mfg[I] = (OldVal >> (I*8));
- OldVal = flread(1);
- for (I = 0; I != 4; I++)
- Id[I] = (OldVal >> (I*8));
-
- Size = handle_jedecs(map,Mfg,Id,4,base,priv);
- if (Size == 0)
- {
- flwrite(Reset,0x555);
- return 0;
- }
-
- /* Check if there is address wrap around within a single bank, if this
- returns JEDEC numbers then we assume that it is wrap around. Notice
- we call this routine with the JEDEC return still enabled, if two or
- more flashes have a truncated address space the probe test will still
- work */
- if (base + (Size<<2)+0x555 < map->size &&
- base + (Size<<2)+0x555 < (base & (~(my_bank_size-1))) + my_bank_size)
- {
- if (flread(base+Size) != flread(base+Size + 0x100) ||
- flread(base+Size + 1) != flread(base+Size + 0x101))
- {
- jedec_probe32(map,base+Size,priv);
- }
- }
-
- // Reset.
- flwrite(0xF0F0F0F0,0x555);
-
- return 1;
-
- #undef flread
- #undef flwrite
-}
-
-/* Linear read. */
-static int jedec_read(struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, u_char *buf)
-{
- struct map_info *map = mtd->priv;
-
- map_copy_from(map, buf, from, len);
- *retlen = len;
- return 0;
-}
-
-/* Banked read. Take special care to jump past the holes in the bank
- mapping. This version assumes symetry in the holes.. */
-static int jedec_read_banked(struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, u_char *buf)
-{
- struct map_info *map = mtd->priv;
- struct jedec_private *priv = map->fldrv_priv;
-
- *retlen = 0;
- while (len > 0)
- {
- // Determine what bank and offset into that bank the first byte is
- unsigned long bank = from & (~(priv->bank_fill[0]-1));
- unsigned long offset = from & (priv->bank_fill[0]-1);
- unsigned long get = len;
- if (priv->bank_fill[0] - offset < len)
- get = priv->bank_fill[0] - offset;
-
- bank /= priv->bank_fill[0];
- map_copy_from(map,buf + *retlen,bank*my_bank_size + offset,get);
-
- len -= get;
- *retlen += get;
- from += get;
- }
- return 0;
-}
-
-/* Pass the flags value that the flash return before it re-entered read
- mode. */
-static void jedec_flash_failed(unsigned char code)
-{
- /* Bit 5 being high indicates that there was an internal device
- failure, erasure time limits exceeded or something */
- if ((code & (1 << 5)) != 0)
- {
- printk("mtd: Internal Flash failure\n");
- return;
- }
- printk("mtd: Programming didn't take\n");
-}
-
-/* This uses the erasure function described in the AMD Flash Handbook,
- it will work for flashes with a fixed sector size only. Flashes with
- a selection of sector sizes (ie the AMD Am29F800B) will need a different
- routine. This routine tries to parallize erasing multiple chips/sectors
- where possible */
-static int flash_erase(struct mtd_info *mtd, struct erase_info *instr)
-{
- // Does IO to the currently selected chip
- #define flread(x) map_read8(map,chip->base+((x)<<chip->addrshift))
- #define flwrite(v,x) map_write8(map,v,chip->base+((x)<<chip->addrshift))
-
- unsigned long Time = 0;
- unsigned long NoTime = 0;
- unsigned long start = instr->addr, len = instr->len;
- unsigned int I;
- struct map_info *map = mtd->priv;
- struct jedec_private *priv = map->fldrv_priv;
-
- // Verify the arguments..
- if (start + len > mtd->size ||
- (start % mtd->erasesize) != 0 ||
- (len % mtd->erasesize) != 0 ||
- (len/mtd->erasesize) == 0)
- return -EINVAL;
-
- jedec_flash_chip_scan(priv,start,len);
-
- // Start the erase sequence on each chip
- for (I = 0; priv->chips[I].jedec != 0 && I < MAX_JEDEC_CHIPS; I++)
- {
- unsigned long off;
- struct jedec_flash_chip *chip = priv->chips + I;
-
- if (chip->length == 0)
- continue;
-
- if (chip->start + chip->length > chip->size)
- {
- printk("DIE\n");
- return -EIO;
- }
-
- flwrite(0xF0,chip->start + 0x555);
- flwrite(0xAA,chip->start + 0x555);
- flwrite(0x55,chip->start + 0x2AA);
- flwrite(0x80,chip->start + 0x555);
- flwrite(0xAA,chip->start + 0x555);
- flwrite(0x55,chip->start + 0x2AA);
-
- /* Once we start selecting the erase sectors the delay between each
- command must not exceed 50us or it will immediately start erasing
- and ignore the other sectors */
- for (off = 0; off < len; off += chip->sectorsize)
- {
- // Check to make sure we didn't timeout
- flwrite(0x30,chip->start + off);
- if (off == 0)
- continue;
- if ((flread(chip->start + off) & (1 << 3)) != 0)
- {
- printk("mtd: Ack! We timed out the erase timer!\n");
- return -EIO;
- }
- }
- }
-
- /* We could split this into a timer routine and return early, performing
- background erasure.. Maybe later if the need warrents */
-
- /* Poll the flash for erasure completion, specs say this can take as long
- as 480 seconds to do all the sectors (for a 2 meg flash).
- Erasure time is dependent on chip age, temp and wear.. */
-
- /* This being a generic routine assumes a 32 bit bus. It does read32s
- and bundles interleved chips into the same grouping. This will work
- for all bus widths */
- Time = 0;
- NoTime = 0;
- for (I = 0; priv->chips[I].jedec != 0 && I < MAX_JEDEC_CHIPS; I++)
- {
- struct jedec_flash_chip *chip = priv->chips + I;
- unsigned long off = 0;
- unsigned todo[4] = {0,0,0,0};
- unsigned todo_left = 0;
- unsigned J;
-
- if (chip->length == 0)
- continue;
-
- /* Find all chips in this data line, realistically this is all
- or nothing up to the interleve count */
- for (J = 0; priv->chips[J].jedec != 0 && J < MAX_JEDEC_CHIPS; J++)
- {
- if ((priv->chips[J].base & (~((1<<chip->addrshift)-1))) ==
- (chip->base & (~((1<<chip->addrshift)-1))))
- {
- todo_left++;
- todo[priv->chips[J].base & ((1<<chip->addrshift)-1)] = 1;
- }
- }
-
- /* printk("todo: %x %x %x %x\n",(short)todo[0],(short)todo[1],
- (short)todo[2],(short)todo[3]);
- */
- while (1)
- {
- __u32 Last[4];
- unsigned long Count = 0;
-
- /* During erase bit 7 is held low and bit 6 toggles, we watch this,
- should it stop toggling or go high then the erase is completed,
- or this is not really flash ;> */
- switch (map->buswidth) {
- case 1:
- Last[0] = map_read8(map,(chip->base >> chip->addrshift) + chip->start + off);
- Last[1] = map_read8(map,(chip->base >> chip->addrshift) + chip->start + off);
- Last[2] = map_read8(map,(chip->base >> chip->addrshift) + chip->start + off);
- break;
- case 2:
- Last[0] = map_read16(map,(chip->base >> chip->addrshift) + chip->start + off);
- Last[1] = map_read16(map,(chip->base >> chip->addrshift) + chip->start + off);
- Last[2] = map_read16(map,(chip->base >> chip->addrshift) + chip->start + off);
- break;
- case 3:
- Last[0] = map_read32(map,(chip->base >> chip->addrshift) + chip->start + off);
- Last[1] = map_read32(map,(chip->base >> chip->addrshift) + chip->start + off);
- Last[2] = map_read32(map,(chip->base >> chip->addrshift) + chip->start + off);
- break;
- }
- Count = 3;
- while (todo_left != 0)
- {
- for (J = 0; J != 4; J++)
- {
- __u8 Byte1 = (Last[(Count-1)%4] >> (J*8)) & 0xFF;
- __u8 Byte2 = (Last[(Count-2)%4] >> (J*8)) & 0xFF;
- __u8 Byte3 = (Last[(Count-3)%4] >> (J*8)) & 0xFF;
- if (todo[J] == 0)
- continue;
-
- if ((Byte1 & (1 << 7)) == 0 && Byte1 != Byte2)
- {
-// printk("Check %x %x %x\n",(short)J,(short)Byte1,(short)Byte2);
- continue;
- }
-
- if (Byte1 == Byte2)
- {
- jedec_flash_failed(Byte3);
- return -EIO;
- }
-
- todo[J] = 0;
- todo_left--;
- }
-
-/* if (NoTime == 0)
- Time += HZ/10 - schedule_timeout(HZ/10);*/
- NoTime = 0;
-
- switch (map->buswidth) {
- case 1:
- Last[Count % 4] = map_read8(map,(chip->base >> chip->addrshift) + chip->start + off);
- break;
- case 2:
- Last[Count % 4] = map_read16(map,(chip->base >> chip->addrshift) + chip->start + off);
- break;
- case 4:
- Last[Count % 4] = map_read32(map,(chip->base >> chip->addrshift) + chip->start + off);
- break;
- }
- Count++;
-
-/* // Count time, max of 15s per sector (according to AMD)
- if (Time > 15*len/mtd->erasesize*HZ)
- {
- printk("mtd: Flash Erase Timed out\n");
- return -EIO;
- } */
- }
-
- // Skip to the next chip if we used chip erase
- if (chip->length == chip->size)
- off = chip->size;
- else
- off += chip->sectorsize;
-
- if (off >= chip->length)
- break;
- NoTime = 1;
- }
-
- for (J = 0; priv->chips[J].jedec != 0 && J < MAX_JEDEC_CHIPS; J++)
- {
- if ((priv->chips[J].base & (~((1<<chip->addrshift)-1))) ==
- (chip->base & (~((1<<chip->addrshift)-1))))
- priv->chips[J].length = 0;
- }
- }
-
- //printk("done\n");
- instr->state = MTD_ERASE_DONE;
- mtd_erase_callback(instr);
- return 0;
-
- #undef flread
- #undef flwrite
-}
-
-/* This is the simple flash writing function. It writes to every byte, in
- sequence. It takes care of how to properly address the flash if
- the flash is interleved. It can only be used if all the chips in the
- array are identical!*/
-static int flash_write(struct mtd_info *mtd, loff_t start, size_t len,
- size_t *retlen, const u_char *buf)
-{
- /* Does IO to the currently selected chip. It takes the bank addressing
- base (which is divisible by the chip size) adds the necessary lower bits
- of addrshift (interleave index) and then adds the control register index. */
- #define flread(x) map_read8(map,base+(off&((1<<chip->addrshift)-1))+((x)<<chip->addrshift))
- #define flwrite(v,x) map_write8(map,v,base+(off&((1<<chip->addrshift)-1))+((x)<<chip->addrshift))
-
- struct map_info *map = mtd->priv;
- struct jedec_private *priv = map->fldrv_priv;
- unsigned long base;
- unsigned long off;
- size_t save_len = len;
-
- if (start + len > mtd->size)
- return -EIO;
-
- //printk("Here");
-
- //printk("flash_write: start is %x, len is %x\n",start,(unsigned long)len);
- while (len != 0)
- {
- struct jedec_flash_chip *chip = priv->chips;
- unsigned long bank;
- unsigned long boffset;
-
- // Compute the base of the flash.
- off = ((unsigned long)start) % (chip->size << chip->addrshift);
- base = start - off;
-
- // Perform banked addressing translation.
- bank = base & (~(priv->bank_fill[0]-1));
- boffset = base & (priv->bank_fill[0]-1);
- bank = (bank/priv->bank_fill[0])*my_bank_size;
- base = bank + boffset;
-
- // printk("Flasing %X %X %X\n",base,chip->size,len);
- // printk("off is %x, compare with %x\n",off,chip->size << chip->addrshift);
-
- // Loop over this page
- for (; off != (chip->size << chip->addrshift) && len != 0; start++, len--, off++,buf++)
- {
- unsigned char oldbyte = map_read8(map,base+off);
- unsigned char Last[4];
- unsigned long Count = 0;
-
- if (oldbyte == *buf) {
- // printk("oldbyte and *buf is %x,len is %x\n",oldbyte,len);
- continue;
- }
- if (((~oldbyte) & *buf) != 0)
- printk("mtd: warn: Trying to set a 0 to a 1\n");
-
- // Write
- flwrite(0xAA,0x555);
- flwrite(0x55,0x2AA);
- flwrite(0xA0,0x555);
- map_write8(map,*buf,base + off);
- Last[0] = map_read8(map,base + off);
- Last[1] = map_read8(map,base + off);
- Last[2] = map_read8(map,base + off);
-
- /* Wait for the flash to finish the operation. We store the last 4
- status bytes that have been retrieved so we can determine why
- it failed. The toggle bits keep toggling when there is a
- failure */
- for (Count = 3; Last[(Count - 1) % 4] != Last[(Count - 2) % 4] &&
- Count < 10000; Count++)
- Last[Count % 4] = map_read8(map,base + off);
- if (Last[(Count - 1) % 4] != *buf)
- {
- jedec_flash_failed(Last[(Count - 3) % 4]);
- return -EIO;
- }
- }
- }
- *retlen = save_len;
- return 0;
-}
-
-/* This is used to enhance the speed of the erase routine,
- when things are being done to multiple chips it is possible to
- parallize the operations, particularly full memory erases of multi
- chip memories benifit */
-static void jedec_flash_chip_scan(struct jedec_private *priv,unsigned long start,
- unsigned long len)
-{
- unsigned int I;
-
- // Zero the records
- for (I = 0; priv->chips[I].jedec != 0 && I < MAX_JEDEC_CHIPS; I++)
- priv->chips[I].start = priv->chips[I].length = 0;
-
- // Intersect the region with each chip
- for (I = 0; priv->chips[I].jedec != 0 && I < MAX_JEDEC_CHIPS; I++)
- {
- struct jedec_flash_chip *chip = priv->chips + I;
- unsigned long ByteStart;
- unsigned long ChipEndByte = chip->offset + (chip->size << chip->addrshift);
-
- // End is before this chip or the start is after it
- if (start+len < chip->offset ||
- ChipEndByte - (1 << chip->addrshift) < start)
- continue;
-
- if (start < chip->offset)
- {
- ByteStart = chip->offset;
- chip->start = 0;
- }
- else
- {
- chip->start = (start - chip->offset + (1 << chip->addrshift)-1) >> chip->addrshift;
- ByteStart = start;
- }
-
- if (start + len >= ChipEndByte)
- chip->length = (ChipEndByte - ByteStart) >> chip->addrshift;
- else
- chip->length = (start + len - ByteStart + (1 << chip->addrshift)-1) >> chip->addrshift;
- }
-}
-
-int __init jedec_init(void)
-{
- register_mtd_chip_driver(&jedec_chipdrv);
- return 0;
-}
-
-static void __exit jedec_exit(void)
-{
- unregister_mtd_chip_driver(&jedec_chipdrv);
-}
-
-module_init(jedec_init);
-module_exit(jedec_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Jason Gunthorpe <jgg@deltatee.com> et al.");
-MODULE_DESCRIPTION("Old MTD chip driver for JEDEC-compliant flash chips");
diff --git a/drivers/mtd/chips/sharp.c b/drivers/mtd/chips/sharp.c
deleted file mode 100644
index c9cd3d21ccf..00000000000
--- a/drivers/mtd/chips/sharp.c
+++ /dev/null
@@ -1,601 +0,0 @@
-/*
- * MTD chip driver for pre-CFI Sharp flash chips
- *
- * Copyright 2000,2001 David A. Schleef <ds@schleef.org>
- * 2000,2001 Lineo, Inc.
- *
- * $Id: sharp.c,v 1.17 2005/11/29 14:28:28 gleixner Exp $
- *
- * Devices supported:
- * LH28F016SCT Symmetrical block flash memory, 2Mx8
- * LH28F008SCT Symmetrical block flash memory, 1Mx8
- *
- * Documentation:
- * http://www.sharpmeg.com/datasheets/memic/flashcmp/
- * http://www.sharpmeg.com/datasheets/memic/flashcmp/01symf/16m/016sctl9.pdf
- * 016sctl9.pdf
- *
- * Limitations:
- * This driver only supports 4x1 arrangement of chips.
- * Not tested on anything but PowerPC.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/sched.h>
-#include <linux/errno.h>
-#include <linux/interrupt.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/cfi.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-
-#define CMD_RESET 0xffffffff
-#define CMD_READ_ID 0x90909090
-#define CMD_READ_STATUS 0x70707070
-#define CMD_CLEAR_STATUS 0x50505050
-#define CMD_BLOCK_ERASE_1 0x20202020
-#define CMD_BLOCK_ERASE_2 0xd0d0d0d0
-#define CMD_BYTE_WRITE 0x40404040
-#define CMD_SUSPEND 0xb0b0b0b0
-#define CMD_RESUME 0xd0d0d0d0
-#define CMD_SET_BLOCK_LOCK_1 0x60606060
-#define CMD_SET_BLOCK_LOCK_2 0x01010101
-#define CMD_SET_MASTER_LOCK_1 0x60606060
-#define CMD_SET_MASTER_LOCK_2 0xf1f1f1f1
-#define CMD_CLEAR_BLOCK_LOCKS_1 0x60606060
-#define CMD_CLEAR_BLOCK_LOCKS_2 0xd0d0d0d0
-
-#define SR_READY 0x80808080 // 1 = ready
-#define SR_ERASE_SUSPEND 0x40404040 // 1 = block erase suspended
-#define SR_ERROR_ERASE 0x20202020 // 1 = error in block erase or clear lock bits
-#define SR_ERROR_WRITE 0x10101010 // 1 = error in byte write or set lock bit
-#define SR_VPP 0x08080808 // 1 = Vpp is low
-#define SR_WRITE_SUSPEND 0x04040404 // 1 = byte write suspended
-#define SR_PROTECT 0x02020202 // 1 = lock bit set
-#define SR_RESERVED 0x01010101
-
-#define SR_ERRORS (SR_ERROR_ERASE|SR_ERROR_WRITE|SR_VPP|SR_PROTECT)
-
-/* Configuration options */
-
-#undef AUTOUNLOCK /* automatically unlocks blocks before erasing */
-
-static struct mtd_info *sharp_probe(struct map_info *);
-
-static int sharp_probe_map(struct map_info *map,struct mtd_info *mtd);
-
-static int sharp_read(struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, u_char *buf);
-static int sharp_write(struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, const u_char *buf);
-static int sharp_erase(struct mtd_info *mtd, struct erase_info *instr);
-static void sharp_sync(struct mtd_info *mtd);
-static int sharp_suspend(struct mtd_info *mtd);
-static void sharp_resume(struct mtd_info *mtd);
-static void sharp_destroy(struct mtd_info *mtd);
-
-static int sharp_write_oneword(struct map_info *map, struct flchip *chip,
- unsigned long adr, __u32 datum);
-static int sharp_erase_oneblock(struct map_info *map, struct flchip *chip,
- unsigned long adr);
-#ifdef AUTOUNLOCK
-static void sharp_unlock_oneblock(struct map_info *map, struct flchip *chip,
- unsigned long adr);
-#endif
-
-
-struct sharp_info{
- struct flchip *chip;
- int bogus;
- int chipshift;
- int numchips;
- struct flchip chips[1];
-};
-
-static void sharp_destroy(struct mtd_info *mtd);
-
-static struct mtd_chip_driver sharp_chipdrv = {
- .probe = sharp_probe,
- .destroy = sharp_destroy,
- .name = "sharp",
- .module = THIS_MODULE
-};
-
-
-static struct mtd_info *sharp_probe(struct map_info *map)
-{
- struct mtd_info *mtd = NULL;
- struct sharp_info *sharp = NULL;
- int width;
-
- mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
- if(!mtd)
- return NULL;
-
- sharp = kzalloc(sizeof(*sharp), GFP_KERNEL);
- if(!sharp) {
- kfree(mtd);
- return NULL;
- }
-
- width = sharp_probe_map(map,mtd);
- if(!width){
- kfree(mtd);
- kfree(sharp);
- return NULL;
- }
-
- mtd->priv = map;
- mtd->type = MTD_NORFLASH;
- mtd->erase = sharp_erase;
- mtd->read = sharp_read;
- mtd->write = sharp_write;
- mtd->sync = sharp_sync;
- mtd->suspend = sharp_suspend;
- mtd->resume = sharp_resume;
- mtd->flags = MTD_CAP_NORFLASH;
- mtd->writesize = 1;
- mtd->name = map->name;
-
- sharp->chipshift = 23;
- sharp->numchips = 1;
- sharp->chips[0].start = 0;
- sharp->chips[0].state = FL_READY;
- sharp->chips[0].mutex = &sharp->chips[0]._spinlock;
- sharp->chips[0].word_write_time = 0;
- init_waitqueue_head(&sharp->chips[0].wq);
- spin_lock_init(&sharp->chips[0]._spinlock);
-
- map->fldrv = &sharp_chipdrv;
- map->fldrv_priv = sharp;
-
- __module_get(THIS_MODULE);
- return mtd;
-}
-
-static inline void sharp_send_cmd(struct map_info *map, unsigned long cmd, unsigned long adr)
-{
- map_word map_cmd;
- map_cmd.x[0] = cmd;
- map_write(map, map_cmd, adr);
-}
-
-static int sharp_probe_map(struct map_info *map,struct mtd_info *mtd)
-{
- map_word tmp, read0, read4;
- unsigned long base = 0;
- int width = 4;
-
- tmp = map_read(map, base+0);
-
- sharp_send_cmd(map, CMD_READ_ID, base+0);
-
- read0 = map_read(map, base+0);
- read4 = map_read(map, base+4);
- if(read0.x[0] == 0x89898989){
- printk("Looks like sharp flash\n");
- switch(read4.x[0]){
- case 0xaaaaaaaa:
- case 0xa0a0a0a0:
- /* aa - LH28F016SCT-L95 2Mx8, 32 64k blocks*/
- /* a0 - LH28F016SCT-Z4 2Mx8, 32 64k blocks*/
- mtd->erasesize = 0x10000 * width;
- mtd->size = 0x200000 * width;
- return width;
- case 0xa6a6a6a6:
- /* a6 - LH28F008SCT-L12 1Mx8, 16 64k blocks*/
- /* a6 - LH28F008SCR-L85 1Mx8, 16 64k blocks*/
- mtd->erasesize = 0x10000 * width;
- mtd->size = 0x100000 * width;
- return width;
-#if 0
- case 0x00000000: /* unknown */
- /* XX - LH28F004SCT 512kx8, 8 64k blocks*/
- mtd->erasesize = 0x10000 * width;
- mtd->size = 0x80000 * width;
- return width;
-#endif
- default:
- printk("Sort-of looks like sharp flash, 0x%08lx 0x%08lx\n",
- read0.x[0], read4.x[0]);
- }
- }else if((map_read(map, base+0).x[0] == CMD_READ_ID)){
- /* RAM, probably */
- printk("Looks like RAM\n");
- map_write(map, tmp, base+0);
- }else{
- printk("Doesn't look like sharp flash, 0x%08lx 0x%08lx\n",
- read0.x[0], read4.x[0]);
- }
-
- return 0;
-}
-
-/* This function returns with the chip->mutex lock held. */
-static int sharp_wait(struct map_info *map, struct flchip *chip)
-{
- int i;
- map_word status;
- unsigned long timeo = jiffies + HZ;
- DECLARE_WAITQUEUE(wait, current);
- int adr = 0;
-
-retry:
- spin_lock_bh(chip->mutex);
-
- switch(chip->state){
- case FL_READY:
- sharp_send_cmd(map, CMD_READ_STATUS, adr);
- chip->state = FL_STATUS;
- case FL_STATUS:
- for(i=0;i<100;i++){
- status = map_read(map, adr);
- if((status.x[0] & SR_READY)==SR_READY)
- break;
- udelay(1);
- }
- break;
- default:
- printk("Waiting for chip\n");
-
- set_current_state(TASK_INTERRUPTIBLE);
- add_wait_queue(&chip->wq, &wait);
-
- spin_unlock_bh(chip->mutex);
-
- schedule();
- remove_wait_queue(&chip->wq, &wait);
-
- if(signal_pending(current))
- return -EINTR;
-
- timeo = jiffies + HZ;
-
- goto retry;
- }
-
- sharp_send_cmd(map, CMD_RESET, adr);
-
- chip->state = FL_READY;
-
- return 0;
-}
-
-static void sharp_release(struct flchip *chip)
-{
- wake_up(&chip->wq);
- spin_unlock_bh(chip->mutex);
-}
-
-static int sharp_read(struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, u_char *buf)
-{
- struct map_info *map = mtd->priv;
- struct sharp_info *sharp = map->fldrv_priv;
- int chipnum;
- int ret = 0;
- int ofs = 0;
-
- chipnum = (from >> sharp->chipshift);
- ofs = from & ((1 << sharp->chipshift)-1);
-
- *retlen = 0;
-
- while(len){
- unsigned long thislen;
-
- if(chipnum>=sharp->numchips)
- break;
-
- thislen = len;
- if(ofs+thislen >= (1<<sharp->chipshift))
- thislen = (1<<sharp->chipshift) - ofs;
-
- ret = sharp_wait(map,&sharp->chips[chipnum]);
- if(ret<0)
- break;
-
- map_copy_from(map,buf,ofs,thislen);
-
- sharp_release(&sharp->chips[chipnum]);
-
- *retlen += thislen;
- len -= thislen;
- buf += thislen;
-
- ofs = 0;
- chipnum++;
- }
- return ret;
-}
-
-static int sharp_write(struct mtd_info *mtd, loff_t to, size_t len,
- size_t *retlen, const u_char *buf)
-{
- struct map_info *map = mtd->priv;
- struct sharp_info *sharp = map->fldrv_priv;
- int ret = 0;
- int i,j;
- int chipnum;
- unsigned long ofs;
- union { u32 l; unsigned char uc[4]; } tbuf;
-
- *retlen = 0;
-
- while(len){
- tbuf.l = 0xffffffff;
- chipnum = to >> sharp->chipshift;
- ofs = to & ((1<<sharp->chipshift)-1);
-
- j=0;
- for(i=ofs&3;i<4 && len;i++){
- tbuf.uc[i] = *buf;
- buf++;
- to++;
- len--;
- j++;
- }
- sharp_write_oneword(map, &sharp->chips[chipnum], ofs&~3, tbuf.l);
- if(ret<0)
- return ret;
- (*retlen)+=j;
- }
-
- return 0;
-}
-
-static int sharp_write_oneword(struct map_info *map, struct flchip *chip,
- unsigned long adr, __u32 datum)
-{
- int ret;
- int timeo;
- int try;
- int i;
- map_word data, status;
-
- status.x[0] = 0;
- ret = sharp_wait(map,chip);
-
- for(try=0;try<10;try++){
- sharp_send_cmd(map, CMD_BYTE_WRITE, adr);
- /* cpu_to_le32 -> hack to fix the writel be->le conversion */
- data.x[0] = cpu_to_le32(datum);
- map_write(map, data, adr);
-
- chip->state = FL_WRITING;
-
- timeo = jiffies + (HZ/2);
-
- sharp_send_cmd(map, CMD_READ_STATUS, adr);
- for(i=0;i<100;i++){
- status = map_read(map, adr);
- if((status.x[0] & SR_READY) == SR_READY)
- break;
- }
- if(i==100){
- printk("sharp: timed out writing\n");
- }
-
- if(!(status.x[0] & SR_ERRORS))
- break;
-
- printk("sharp: error writing byte at addr=%08lx status=%08lx\n", adr, status.x[0]);
-
- sharp_send_cmd(map, CMD_CLEAR_STATUS, adr);
- }
- sharp_send_cmd(map, CMD_RESET, adr);
- chip->state = FL_READY;
-
- wake_up(&chip->wq);
- spin_unlock_bh(chip->mutex);
-
- return 0;
-}
-
-static int sharp_erase(struct mtd_info *mtd, struct erase_info *instr)
-{
- struct map_info *map = mtd->priv;
- struct sharp_info *sharp = map->fldrv_priv;
- unsigned long adr,len;
- int chipnum, ret=0;
-
-//printk("sharp_erase()\n");
- if(instr->addr & (mtd->erasesize - 1))
- return -EINVAL;
- if(instr->len & (mtd->erasesize - 1))
- return -EINVAL;
- if(instr->len + instr->addr > mtd->size)
- return -EINVAL;
-
- chipnum = instr->addr >> sharp->chipshift;
- adr = instr->addr & ((1<<sharp->chipshift)-1);
- len = instr->len;
-
- while(len){
- ret = sharp_erase_oneblock(map, &sharp->chips[chipnum], adr);
- if(ret)return ret;
-
- adr += mtd->erasesize;
- len -= mtd->erasesize;
- if(adr >> sharp->chipshift){
- adr = 0;
- chipnum++;
- if(chipnum>=sharp->numchips)
- break;
- }
- }
-
- instr->state = MTD_ERASE_DONE;
- mtd_erase_callback(instr);
-
- return 0;
-}
-
-static int sharp_do_wait_for_ready(struct map_info *map, struct flchip *chip,
- unsigned long adr)
-{
- int ret;
- unsigned long timeo;
- map_word status;
- DECLARE_WAITQUEUE(wait, current);
-
- sharp_send_cmd(map, CMD_READ_STATUS, adr);
- status = map_read(map, adr);
-
- timeo = jiffies + HZ;
-
- while(time_before(jiffies, timeo)){
- sharp_send_cmd(map, CMD_READ_STATUS, adr);
- status = map_read(map, adr);
- if((status.x[0] & SR_READY)==SR_READY){
- ret = 0;
- goto out;
- }
- set_current_state(TASK_INTERRUPTIBLE);
- add_wait_queue(&chip->wq, &wait);
-
- //spin_unlock_bh(chip->mutex);
-
- schedule_timeout(1);
- schedule();
- remove_wait_queue(&chip->wq, &wait);
-
- //spin_lock_bh(chip->mutex);
-
- if (signal_pending(current)){
- ret = -EINTR;
- goto out;
- }
-
- }
- ret = -ETIME;
-out:
- return ret;
-}
-
-static int sharp_erase_oneblock(struct map_info *map, struct flchip *chip,
- unsigned long adr)
-{
- int ret;
- //int timeo;
- map_word status;
- //int i;
-
-//printk("sharp_erase_oneblock()\n");
-
-#ifdef AUTOUNLOCK
- /* This seems like a good place to do an unlock */
- sharp_unlock_oneblock(map,chip,adr);
-#endif
-
- sharp_send_cmd(map, CMD_BLOCK_ERASE_1, adr);
- sharp_send_cmd(map, CMD_BLOCK_ERASE_2, adr);
-
- chip->state = FL_ERASING;
-
- ret = sharp_do_wait_for_ready(map,chip,adr);
- if(ret<0)return ret;
-
- sharp_send_cmd(map, CMD_READ_STATUS, adr);
- status = map_read(map, adr);
-
- if(!(status.x[0] & SR_ERRORS)){
- sharp_send_cmd(map, CMD_RESET, adr);
- chip->state = FL_READY;
- //spin_unlock_bh(chip->mutex);
- return 0;
- }
-
- printk("sharp: error erasing block at addr=%08lx status=%08lx\n", adr, status.x[0]);
- sharp_send_cmd(map, CMD_CLEAR_STATUS, adr);
-
- //spin_unlock_bh(chip->mutex);
-
- return -EIO;
-}
-
-#ifdef AUTOUNLOCK
-static void sharp_unlock_oneblock(struct map_info *map, struct flchip *chip,
- unsigned long adr)
-{
- int i;
- map_word status;
-
- sharp_send_cmd(map, CMD_CLEAR_BLOCK_LOCKS_1, adr);
- sharp_send_cmd(map, CMD_CLEAR_BLOCK_LOCKS_2, adr);
-
- udelay(100);
-
- status = map_read(map, adr);
- printk("status=%08lx\n", status.x[0]);
-
- for(i=0;i<1000;i++){
- //sharp_send_cmd(map, CMD_READ_STATUS, adr);
- status = map_read(map, adr);
- if((status.x[0] & SR_READY) == SR_READY)
- break;
- udelay(100);
- }
- if(i==1000){
- printk("sharp: timed out unlocking block\n");
- }
-
- if(!(status.x[0] & SR_ERRORS)){
- sharp_send_cmd(map, CMD_RESET, adr);
- chip->state = FL_READY;
- return;
- }
-
- printk("sharp: error unlocking block at addr=%08lx status=%08lx\n", adr, status.x[0]);
- sharp_send_cmd(map, CMD_CLEAR_STATUS, adr);
-}
-#endif
-
-static void sharp_sync(struct mtd_info *mtd)
-{
- //printk("sharp_sync()\n");
-}
-
-static int sharp_suspend(struct mtd_info *mtd)
-{
- printk("sharp_suspend()\n");
- return -EINVAL;
-}
-
-static void sharp_resume(struct mtd_info *mtd)
-{
- printk("sharp_resume()\n");
-
-}
-
-static void sharp_destroy(struct mtd_info *mtd)
-{
- printk("sharp_destroy()\n");
-
-}
-
-static int __init sharp_probe_init(void)
-{
- printk("MTD Sharp chip driver <ds@lineo.com>\n");
-
- register_mtd_chip_driver(&sharp_chipdrv);
-
- return 0;
-}
-
-static void __exit sharp_probe_exit(void)
-{
- unregister_mtd_chip_driver(&sharp_chipdrv);
-}
-
-module_init(sharp_probe_init);
-module_exit(sharp_probe_exit);
-
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("David Schleef <ds@schleef.org>");
-MODULE_DESCRIPTION("Old MTD chip driver for pre-CFI Sharp flash chips");
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index fc4cc8ba9e2..be4b9948c76 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -373,7 +373,7 @@ static inline void kill_final_newline(char *str)
#ifndef MODULE
static int block2mtd_init_called = 0;
-static __initdata char block2mtd_paramline[80 + 12]; /* 80 for device, 12 for erase size */
+static char block2mtd_paramline[80 + 12]; /* 80 for device, 12 for erase size */
#endif
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index d990d8141ef..b665e4ac220 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -60,7 +60,7 @@ config MTD_PHYSMAP_BANKWIDTH
(i.e., run-time calling physmap_configure()).
config MTD_PHYSMAP_OF
- tristate "Flash device in physical memory map based on OF descirption"
+ tristate "Flash device in physical memory map based on OF description"
depends on PPC_OF && (MTD_CFI || MTD_JEDECPROBE || MTD_ROM)
help
This provides a 'mapping' driver which allows the NOR Flash and
@@ -358,22 +358,6 @@ config MTD_CFI_FLAGADM
Mapping for the Flaga digital module. If you don't have one, ignore
this setting.
-config MTD_BEECH
- tristate "CFI Flash device mapped on IBM 405LP Beech"
- depends on MTD_CFI && BEECH
- help
- This enables access routines for the flash chips on the IBM
- 405LP Beech board. If you have one of these boards and would like
- to use the flash chips on it, say 'Y'.
-
-config MTD_ARCTIC
- tristate "CFI Flash device mapped on IBM 405LP Arctic"
- depends on MTD_CFI && ARCTIC2
- help
- This enables access routines for the flash chips on the IBM 405LP
- Arctic board. If you have one of these boards and would like to
- use the flash chips on it, say 'Y'.
-
config MTD_WALNUT
tristate "Flash device mapped on IBM 405GP Walnut"
depends on MTD_JEDECPROBE && WALNUT
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile
index de036c5e613..3acbb5d01ca 100644
--- a/drivers/mtd/maps/Makefile
+++ b/drivers/mtd/maps/Makefile
@@ -58,8 +58,6 @@ obj-$(CONFIG_MTD_NETtel) += nettel.o
obj-$(CONFIG_MTD_SCB2_FLASH) += scb2_flash.o
obj-$(CONFIG_MTD_EBONY) += ebony.o
obj-$(CONFIG_MTD_OCOTEA) += ocotea.o
-obj-$(CONFIG_MTD_BEECH) += beech-mtd.o
-obj-$(CONFIG_MTD_ARCTIC) += arctic-mtd.o
obj-$(CONFIG_MTD_WALNUT) += walnut.o
obj-$(CONFIG_MTD_H720X) += h720x-flash.o
obj-$(CONFIG_MTD_SBC8240) += sbc8240.o
diff --git a/drivers/mtd/maps/arctic-mtd.c b/drivers/mtd/maps/arctic-mtd.c
deleted file mode 100644
index 2cc90243627..00000000000
--- a/drivers/mtd/maps/arctic-mtd.c
+++ /dev/null
@@ -1,145 +0,0 @@
-/*
- * $Id: arctic-mtd.c,v 1.14 2005/11/07 11:14:26 gleixner Exp $
- *
- * drivers/mtd/maps/arctic-mtd.c MTD mappings and partition tables for
- * IBM 405LP Arctic boards.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Copyright (C) 2002, International Business Machines Corporation
- * All Rights Reserved.
- *
- * Bishop Brock
- * IBM Research, Austin Center for Low-Power Computing
- * bcbrock@us.ibm.com
- * March 2002
- *
- * modified for Arctic by,
- * David Gibson
- * IBM OzLabs, Canberra, Australia
- * <arctic@gibson.dropbear.id.au>
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/init.h>
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/partitions.h>
-
-#include <asm/io.h>
-#include <asm/ibm4xx.h>
-
-/*
- * 0 : 0xFE00 0000 - 0xFEFF FFFF : Filesystem 1 (16MiB)
- * 1 : 0xFF00 0000 - 0xFF4F FFFF : kernel (5.12MiB)
- * 2 : 0xFF50 0000 - 0xFFF5 FFFF : Filesystem 2 (10.624MiB) (if non-XIP)
- * 3 : 0xFFF6 0000 - 0xFFFF FFFF : PIBS Firmware (640KiB)
- */
-
-#define FFS1_SIZE 0x01000000 /* 16MiB */
-#define KERNEL_SIZE 0x00500000 /* 5.12MiB */
-#define FFS2_SIZE 0x00a60000 /* 10.624MiB */
-#define FIRMWARE_SIZE 0x000a0000 /* 640KiB */
-
-
-#define NAME "Arctic Linux Flash"
-#define PADDR SUBZERO_BOOTFLASH_PADDR
-#define BUSWIDTH 2
-#define SIZE SUBZERO_BOOTFLASH_SIZE
-#define PARTITIONS 4
-
-/* Flash memories on these boards are memory resources, accessed big-endian. */
-
-{
- /* do nothing for now */
-}
-
-static struct map_info arctic_mtd_map = {
- .name = NAME,
- .size = SIZE,
- .bankwidth = BUSWIDTH,
- .phys = PADDR,
-};
-
-static struct mtd_info *arctic_mtd;
-
-static struct mtd_partition arctic_partitions[PARTITIONS] = {
- { .name = "Filesystem",
- .size = FFS1_SIZE,
- .offset = 0,},
- { .name = "Kernel",
- .size = KERNEL_SIZE,
- .offset = FFS1_SIZE,},
- { .name = "Filesystem",
- .size = FFS2_SIZE,
- .offset = FFS1_SIZE + KERNEL_SIZE,},
- { .name = "Firmware",
- .size = FIRMWARE_SIZE,
- .offset = SUBZERO_BOOTFLASH_SIZE - FIRMWARE_SIZE,},
-};
-
-static int __init
-init_arctic_mtd(void)
-{
- int err;
-
- printk("%s: 0x%08x at 0x%08x\n", NAME, SIZE, PADDR);
-
- arctic_mtd_map.virt = ioremap(PADDR, SIZE);
-
- if (!arctic_mtd_map.virt) {
- printk("%s: failed to ioremap 0x%x\n", NAME, PADDR);
- return -EIO;
- }
- simple_map_init(&arctic_mtd_map);
-
- printk("%s: probing %d-bit flash bus\n", NAME, BUSWIDTH * 8);
- arctic_mtd = do_map_probe("cfi_probe", &arctic_mtd_map);
-
- if (!arctic_mtd) {
- iounmap(arctic_mtd_map.virt);
- return -ENXIO;
- }
-
- arctic_mtd->owner = THIS_MODULE;
-
- err = add_mtd_partitions(arctic_mtd, arctic_partitions, PARTITIONS);
- if (err) {
- printk("%s: add_mtd_partitions failed\n", NAME);
- iounmap(arctic_mtd_map.virt);
- }
-
- return err;
-}
-
-static void __exit
-cleanup_arctic_mtd(void)
-{
- if (arctic_mtd) {
- del_mtd_partitions(arctic_mtd);
- map_destroy(arctic_mtd);
- iounmap((void *) arctic_mtd_map.virt);
- }
-}
-
-module_init(init_arctic_mtd);
-module_exit(cleanup_arctic_mtd);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("David Gibson <arctic@gibson.dropbear.id.au>");
-MODULE_DESCRIPTION("MTD map and partitions for IBM 405LP Arctic boards");
diff --git a/drivers/mtd/maps/beech-mtd.c b/drivers/mtd/maps/beech-mtd.c
deleted file mode 100644
index d76d5981b86..00000000000
--- a/drivers/mtd/maps/beech-mtd.c
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
- * $Id: beech-mtd.c,v 1.11 2005/11/07 11:14:26 gleixner Exp $
- *
- * drivers/mtd/maps/beech-mtd.c MTD mappings and partition tables for
- * IBM 405LP Beech boards.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Copyright (C) 2002, International Business Machines Corporation
- * All Rights Reserved.
- *
- * Bishop Brock
- * IBM Research, Austin Center for Low-Power Computing
- * bcbrock@us.ibm.com
- * March 2002
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/init.h>
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/partitions.h>
-
-#include <asm/io.h>
-#include <asm/ibm4xx.h>
-
-#define NAME "Beech Linux Flash"
-#define PADDR BEECH_BIGFLASH_PADDR
-#define SIZE BEECH_BIGFLASH_SIZE
-#define BUSWIDTH 1
-
-/* Flash memories on these boards are memory resources, accessed big-endian. */
-
-
-static struct map_info beech_mtd_map = {
- .name = NAME,
- .size = SIZE,
- .bankwidth = BUSWIDTH,
- .phys = PADDR
-};
-
-static struct mtd_info *beech_mtd;
-
-static struct mtd_partition beech_partitions[2] = {
- {
- .name = "Linux Kernel",
- .size = BEECH_KERNEL_SIZE,
- .offset = BEECH_KERNEL_OFFSET
- }, {
- .name = "Free Area",
- .size = BEECH_FREE_AREA_SIZE,
- .offset = BEECH_FREE_AREA_OFFSET
- }
-};
-
-static int __init
-init_beech_mtd(void)
-{
- int err;
-
- printk("%s: 0x%08x at 0x%08x\n", NAME, SIZE, PADDR);
-
- beech_mtd_map.virt = ioremap(PADDR, SIZE);
-
- if (!beech_mtd_map.virt) {
- printk("%s: failed to ioremap 0x%x\n", NAME, PADDR);
- return -EIO;
- }
-
- simple_map_init(&beech_mtd_map);
-
- printk("%s: probing %d-bit flash bus\n", NAME, BUSWIDTH * 8);
- beech_mtd = do_map_probe("cfi_probe", &beech_mtd_map);
-
- if (!beech_mtd) {
- iounmap(beech_mtd_map.virt);
- return -ENXIO;
- }
-
- beech_mtd->owner = THIS_MODULE;
-
- err = add_mtd_partitions(beech_mtd, beech_partitions, 2);
- if (err) {
- printk("%s: add_mtd_partitions failed\n", NAME);
- iounmap(beech_mtd_map.virt);
- }
-
- return err;
-}
-
-static void __exit
-cleanup_beech_mtd(void)
-{
- if (beech_mtd) {
- del_mtd_partitions(beech_mtd);
- map_destroy(beech_mtd);
- iounmap((void *) beech_mtd_map.virt);
- }
-}
-
-module_init(init_beech_mtd);
-module_exit(cleanup_beech_mtd);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Bishop Brock <bcbrock@us.ibm.com>");
-MODULE_DESCRIPTION("MTD map and partitions for IBM 405LP Beech boards");
diff --git a/drivers/mtd/maps/nettel.c b/drivers/mtd/maps/nettel.c
index 9f53c655af3..7b96cd02f82 100644
--- a/drivers/mtd/maps/nettel.c
+++ b/drivers/mtd/maps/nettel.c
@@ -358,7 +358,7 @@ int __init nettel_init(void)
/* Turn other PAR off so the first probe doesn't find it */
*intel1par = 0;
- /* Probe for the the size of the first Intel flash */
+ /* Probe for the size of the first Intel flash */
nettel_intel_map.size = maxsize;
nettel_intel_map.phys = intel0addr;
nettel_intel_map.virt = ioremap_nocache(intel0addr, maxsize);
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
index 72107dc06d6..bbb42c35b69 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -186,7 +186,7 @@ static int __devinit of_physmap_probe(struct of_device *dev, const struct of_dev
else {
if (strcmp(of_probe, "ROM"))
dev_dbg(&dev->dev, "map_probe: don't know probe type "
- "'%s', mapping as rom\n");
+ "'%s', mapping as rom\n", of_probe);
info->mtd = do_map_probe("mtd_rom", &info->map);
}
if (info->mtd == NULL) {
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 1af989023c6..9c623685294 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -347,7 +347,6 @@ int add_mtd_partitions(struct mtd_info *master,
slave->mtd.subpage_sft = master->subpage_sft;
slave->mtd.name = parts[i].name;
- slave->mtd.bank_size = master->bank_size;
slave->mtd.owner = master->owner;
slave->mtd.read = part_read;
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index d05873b8c15..f1d60b6f048 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -232,11 +232,13 @@ config MTD_NAND_BASLER_EXCITE
will be named "excite_nandflash.ko".
config MTD_NAND_CAFE
- tristate "NAND support for OLPC CAFÉ chip"
- depends on PCI
- help
- Use NAND flash attached to the CAFÉ chip designed for the $100
- laptop.
+ tristate "NAND support for OLPC CAFÉ chip"
+ depends on PCI
+ select REED_SOLOMON
+ select REED_SOLOMON_DEC16
+ help
+ Use NAND flash attached to the CAFÉ chip designed for the $100
+ laptop.
config MTD_NAND_CS553X
tristate "NAND support for CS5535/CS5536 (AMD Geode companion chip)"
@@ -270,4 +272,13 @@ config MTD_NAND_NANDSIM
The simulator may simulate various NAND flash chips for the
MTD nand layer.
+config MTD_NAND_PLATFORM
+ tristate "Support for generic platform NAND driver"
+ depends on MTD_NAND
+ help
+ This implements a generic NAND driver for on-SOC platform
+ devices. You will need to provide platform-specific functions
+ via platform_data.
+
+
endif # MTD_NAND
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index 6872031a3fb..edba1db14bf 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -26,6 +26,6 @@ obj-$(CONFIG_MTD_NAND_NDFC) += ndfc.o
obj-$(CONFIG_MTD_NAND_AT91) += at91_nand.o
obj-$(CONFIG_MTD_NAND_CM_X270) += cmx270_nand.o
obj-$(CONFIG_MTD_NAND_BASLER_EXCITE) += excite_nandflash.o
+obj-$(CONFIG_MTD_NAND_PLATFORM) += plat_nand.o
nand-objs := nand_base.o nand_bbt.o
-cafe_nand-objs := cafe.o cafe_ecc.o
diff --git a/drivers/mtd/nand/at91_nand.c b/drivers/mtd/nand/at91_nand.c
index 14b80cc90a7..512e999177f 100644
--- a/drivers/mtd/nand/at91_nand.c
+++ b/drivers/mtd/nand/at91_nand.c
@@ -82,6 +82,10 @@ static void at91_nand_disable(struct at91_nand_host *host)
at91_set_gpio_value(host->board->enable_pin, 1);
}
+#ifdef CONFIG_MTD_PARTITIONS
+const char *part_probes[] = { "cmdlinepart", NULL };
+#endif
+
/*
* Probe for the NAND device.
*/
@@ -151,6 +155,12 @@ static int __init at91_nand_probe(struct platform_device *pdev)
#ifdef CONFIG_MTD_PARTITIONS
if (host->board->partition_info)
partitions = host->board->partition_info(mtd->size, &num_partitions);
+#ifdef CONFIG_MTD_CMDLINE_PARTS
+ else {
+ mtd->name = "at91_nand";
+ num_partitions = parse_mtd_partitions(mtd, part_probes, &partitions, 0);
+ }
+#endif
if ((!partitions) || (num_partitions == 0)) {
printk(KERN_ERR "at91_nand: No parititions defined, or unsupported device.\n");
diff --git a/drivers/mtd/nand/cafe_ecc.c b/drivers/mtd/nand/cafe_ecc.c
deleted file mode 100644
index ea5c8491d2c..00000000000
--- a/drivers/mtd/nand/cafe_ecc.c
+++ /dev/null
@@ -1,1381 +0,0 @@
-/* Error correction for CAFÉ NAND controller
- *
- * © 2006 Marvell, Inc.
- * Author: Tom Chiou
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59
- * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/errno.h>
-
-static unsigned short gf4096_mul(unsigned short, unsigned short);
-static unsigned short gf64_mul(unsigned short, unsigned short);
-static unsigned short gf4096_inv(unsigned short);
-static unsigned short err_pos(unsigned short);
-static void find_4bit_err_coefs(unsigned short, unsigned short, unsigned short,
- unsigned short, unsigned short, unsigned short,
- unsigned short, unsigned short, unsigned short *);
-static void zero_4x5_col3(unsigned short[4][5]);
-static void zero_4x5_col2(unsigned short[4][5]);
-static void zero_4x5_col1(unsigned short[4][5]);
-static void swap_4x5_rows(unsigned short[4][5], int, int, int);
-static void swap_2x3_rows(unsigned short m[2][3]);
-static void solve_4x5(unsigned short m[4][5], unsigned short *, int *);
-static void sort_coefs(int *, unsigned short *, int);
-static void find_4bit_err_pats(unsigned short, unsigned short, unsigned short,
- unsigned short, unsigned short, unsigned short,
- unsigned short, unsigned short, unsigned short *);
-static void find_3bit_err_coefs(unsigned short, unsigned short, unsigned short,
- unsigned short, unsigned short, unsigned short,
- unsigned short *);
-static void zero_3x4_col2(unsigned short[3][4]);
-static void zero_3x4_col1(unsigned short[3][4]);
-static void swap_3x4_rows(unsigned short[3][4], int, int, int);
-static void solve_3x4(unsigned short[3][4], unsigned short *, int *);
-static void find_3bit_err_pats(unsigned short, unsigned short, unsigned short,
- unsigned short, unsigned short, unsigned short,
- unsigned short *);
-
-static void find_2bit_err_pats(unsigned short, unsigned short, unsigned short,
- unsigned short, unsigned short *);
-static void find_2x2_soln(unsigned short, unsigned short, unsigned short,
- unsigned short, unsigned short, unsigned short,
- unsigned short *);
-static void solve_2x3(unsigned short[2][3], unsigned short *);
-static int chk_no_err_only(unsigned short *, unsigned short *);
-static int chk_1_err_only(unsigned short *, unsigned short *);
-static int chk_2_err_only(unsigned short *, unsigned short *);
-static int chk_3_err_only(unsigned short *, unsigned short *);
-static int chk_4_err_only(unsigned short *, unsigned short *);
-
-static unsigned short gf64_mul(unsigned short a, unsigned short b)
-{
- unsigned short tmp1, tmp2, tmp3, tmp4, tmp5;
- unsigned short c_bit0, c_bit1, c_bit2, c_bit3, c_bit4, c_bit5, c;
-
- tmp1 = ((a) ^ (a >> 5));
- tmp2 = ((a >> 4) ^ (a >> 5));
- tmp3 = ((a >> 3) ^ (a >> 4));
- tmp4 = ((a >> 2) ^ (a >> 3));
- tmp5 = ((a >> 1) ^ (a >> 2));
-
- c_bit0 = ((a & b) ^ ((a >> 5) & (b >> 1)) ^ ((a >> 4) & (b >> 2)) ^
- ((a >> 3) & (b >> 3)) ^ ((a >> 2) & (b >> 4)) ^ ((a >> 1) & (b >> 5))) & 0x1;
-
- c_bit1 = (((a >> 1) & b) ^ (tmp1 & (b >> 1)) ^ (tmp2 & (b >> 2)) ^
- (tmp3 & (b >> 3)) ^ (tmp4 & (b >> 4)) ^ (tmp5 & (b >> 5))) & 0x1;
-
- c_bit2 = (((a >> 2) & b) ^ ((a >> 1) & (b >> 1)) ^ (tmp1 & (b >> 2)) ^
- (tmp2 & (b >> 3)) ^ (tmp3 & (b >> 4)) ^ (tmp4 & (b >> 5))) & 0x1;
-
- c_bit3 = (((a >> 3) & b) ^ ((a >> 2) & (b >> 1)) ^ ((a >> 1) & (b >> 2)) ^
- (tmp1 & (b >> 3)) ^ (tmp2 & (b >> 4)) ^ (tmp3 & (b >> 5))) & 0x1;
-
- c_bit4 = (((a >> 4) & b) ^ ((a >> 3) & (b >> 1)) ^ ((a >> 2) & (b >> 2)) ^
- ((a >> 1) & (b >> 3)) ^ (tmp1 & (b >> 4)) ^ (tmp2 & (b >> 5))) & 0x1;
-
- c_bit5 = (((a >> 5) & b) ^ ((a >> 4) & (b >> 1)) ^ ((a >> 3) & (b >> 2)) ^
- ((a >> 2) & (b >> 3)) ^ ((a >> 1) & (b >> 4)) ^ (tmp1 & (b >> 5))) & 0x1;
-
- c = c_bit0 | (c_bit1 << 1) | (c_bit2 << 2) | (c_bit3 << 3) | (c_bit4 << 4) | (c_bit5 << 5);
-
- return c;
-}
-
-static unsigned short gf4096_mul(unsigned short a, unsigned short b)
-{
- unsigned short ah, al, bh, bl, alxah, blxbh, ablh, albl, ahbh, ahbhB, c;
-
- ah = (a >> 6) & 0x3f;
- al = a & 0x3f;
- bh = (b >> 6) & 0x3f;
- bl = b & 0x3f;
- alxah = al ^ ah;
- blxbh = bl ^ bh;
-
- ablh = gf64_mul(alxah, blxbh);
- albl = gf64_mul(al, bl);
- ahbh = gf64_mul(ah, bh);
-
- ahbhB = ((ahbh & 0x1) << 5) |
- ((ahbh & 0x20) >> 1) |
- ((ahbh & 0x10) >> 1) | ((ahbh & 0x8) >> 1) | ((ahbh & 0x4) >> 1) | (((ahbh >> 1) ^ ahbh) & 0x1);
-
- c = ((ablh ^ albl) << 6) | (ahbhB ^ albl);
- return c;
-}
-
-static void find_2bit_err_pats(unsigned short s0, unsigned short s1, unsigned short r0, unsigned short r1, unsigned short *pats)
-{
- find_2x2_soln(0x1, 0x1, r0, r1, s0, s1, pats);
-}
-
-static void find_3bit_err_coefs(unsigned short s0, unsigned short s1,
- unsigned short s2, unsigned short s3, unsigned short s4, unsigned short s5, unsigned short *coefs)
-{
- unsigned short m[3][4];
- int row_order[3];
-
- row_order[0] = 0;
- row_order[1] = 1;
- row_order[2] = 2;
- m[0][0] = s2;
- m[0][1] = s1;
- m[0][2] = s0;
- m[0][3] = s3;
- m[1][0] = s3;
- m[1][1] = s2;
- m[1][2] = s1;
- m[1][3] = s4;
- m[2][0] = s4;
- m[2][1] = s3;
- m[2][2] = s2;
- m[2][3] = s5;
-
- if (m[0][2] != 0x0) {
- zero_3x4_col2(m);
- } else if (m[1][2] != 0x0) {
- swap_3x4_rows(m, 0, 1, 4);
- zero_3x4_col2(m);
- } else if (m[2][2] != 0x0) {
- swap_3x4_rows(m, 0, 2, 4);
- zero_3x4_col2(m);
- } else {
- printk(KERN_ERR "Error: find_3bit_err_coefs, s0,s1,s2 all zeros!\n");
- }
-
- if (m[1][1] != 0x0) {
- zero_3x4_col1(m);
- } else if (m[2][1] != 0x0) {
- swap_3x4_rows(m, 1, 2, 4);
- zero_3x4_col1(m);
- } else {
- printk(KERN_ERR "Error: find_3bit_err_coefs, cannot resolve col 1!\n");
- }
-
- /* solve coefs */
- solve_3x4(m, coefs, row_order);
-}
-
-static void zero_3x4_col2(unsigned short m[3][4])
-{
- unsigned short minv1, minv2;
-
- minv1 = gf4096_mul(m[1][2], gf4096_inv(m[0][2]));
- minv2 = gf4096_mul(m[2][2], gf4096_inv(m[0][2]));
- m[1][0] = m[1][0] ^ gf4096_mul(m[0][0], minv1);
- m[1][1] = m[1][1] ^ gf4096_mul(m[0][1], minv1);
- m[1][3] = m[1][3] ^ gf4096_mul(m[0][3], minv1);
- m[2][0] = m[2][0] ^ gf4096_mul(m[0][0], minv2);
- m[2][1] = m[2][1] ^ gf4096_mul(m[0][1], minv2);
- m[2][3] = m[2][3] ^ gf4096_mul(m[0][3], minv2);
-}
-
-static void zero_3x4_col1(unsigned short m[3][4])
-{
- unsigned short minv;
- minv = gf4096_mul(m[2][1], gf4096_inv(m[1][1]));
- m[2][0] = m[2][0] ^ gf4096_mul(m[1][0], minv);
- m[2][3] = m[2][3] ^ gf4096_mul(m[1][3], minv);
-}
-
-static void swap_3x4_rows(unsigned short m[3][4], int i, int j, int col_width)
-{
- unsigned short tmp0;
- int cnt;
- for (cnt = 0; cnt < col_width; cnt++) {
- tmp0 = m[i][cnt];
- m[i][cnt] = m[j][cnt];
- m[j][cnt] = tmp0;
- }
-}
-
-static void solve_3x4(unsigned short m[3][4], unsigned short *coefs, int *row_order)
-{
- unsigned short tmp[3];
- tmp[0] = gf4096_mul(m[2][3], gf4096_inv(m[2][0]));
- tmp[1] = gf4096_mul((gf4096_mul(tmp[0], m[1][0]) ^ m[1][3]), gf4096_inv(m[1][1]));
- tmp[2] = gf4096_mul((gf4096_mul(tmp[0], m[0][0]) ^ gf4096_mul(tmp[1], m[0][1]) ^ m[0][3]), gf4096_inv(m[0][2]));
- sort_coefs(row_order, tmp, 3);
- coefs[0] = tmp[0];
- coefs[1] = tmp[1];
- coefs[2] = tmp[2];
-}
-
-static void find_3bit_err_pats(unsigned short s0, unsigned short s1,
- unsigned short s2, unsigned short r0,
- unsigned short r1, unsigned short r2,
- unsigned short *pats)
-{
- find_2x2_soln(r0 ^ r2, r1 ^ r2,
- gf4096_mul(r0, r0 ^ r2), gf4096_mul(r1, r1 ^ r2),
- gf4096_mul(s0, r2) ^ s1, gf4096_mul(s1, r2) ^ s2, pats);
- pats[2] = s0 ^ pats[0] ^ pats[1];
-}
-
-static void find_4bit_err_coefs(unsigned short s0, unsigned short s1,
- unsigned short s2, unsigned short s3,
- unsigned short s4, unsigned short s5,
- unsigned short s6, unsigned short s7,
- unsigned short *coefs)
-{
- unsigned short m[4][5];
- int row_order[4];
-
- row_order[0] = 0;
- row_order[1] = 1;
- row_order[2] = 2;
- row_order[3] = 3;
-
- m[0][0] = s3;
- m[0][1] = s2;
- m[0][2] = s1;
- m[0][3] = s0;
- m[0][4] = s4;
- m[1][0] = s4;
- m[1][1] = s3;
- m[1][2] = s2;
- m[1][3] = s1;
- m[1][4] = s5;
- m[2][0] = s5;
- m[2][1] = s4;
- m[2][2] = s3;
- m[2][3] = s2;
- m[2][4] = s6;
- m[3][0] = s6;
- m[3][1] = s5;
- m[3][2] = s4;
- m[3][3] = s3;
- m[3][4] = s7;
-
- if (m[0][3] != 0x0) {
- zero_4x5_col3(m);
- } else if (m[1][3] != 0x0) {
- swap_4x5_rows(m, 0, 1, 5);
- zero_4x5_col3(m);
- } else if (m[2][3] != 0x0) {
- swap_4x5_rows(m, 0, 2, 5);
- zero_4x5_col3(m);
- } else if (m[3][3] != 0x0) {
- swap_4x5_rows(m, 0, 3, 5);
- zero_4x5_col3(m);
- } else {
- printk(KERN_ERR "Error: find_4bit_err_coefs, s0,s1,s2,s3 all zeros!\n");
- }
-
- if (m[1][2] != 0x0) {
- zero_4x5_col2(m);
- } else if (m[2][2] != 0x0) {
- swap_4x5_rows(m, 1, 2, 5);
- zero_4x5_col2(m);
- } else if (m[3][2] != 0x0) {
- swap_4x5_rows(m, 1, 3, 5);
- zero_4x5_col2(m);
- } else {
- printk(KERN_ERR "Error: find_4bit_err_coefs, cannot resolve col 2!\n");
- }
-
- if (m[2][1] != 0x0) {
- zero_4x5_col1(m);
- } else if (m[3][1] != 0x0) {
- swap_4x5_rows(m, 2, 3, 5);
- zero_4x5_col1(m);
- } else {
- printk(KERN_ERR "Error: find_4bit_err_coefs, cannot resolve col 1!\n");
- }
-
- solve_4x5(m, coefs, row_order);
-}
-
-static void zero_4x5_col3(unsigned short m[4][5])
-{
- unsigned short minv1, minv2, minv3;
-
- minv1 = gf4096_mul(m[1][3], gf4096_inv(m[0][3]));
- minv2 = gf4096_mul(m[2][3], gf4096_inv(m[0][3]));
- minv3 = gf4096_mul(m[3][3], gf4096_inv(m[0][3]));
-
- m[1][0] = m[1][0] ^ gf4096_mul(m[0][0], minv1);
- m[1][1] = m[1][1] ^ gf4096_mul(m[0][1], minv1);
- m[1][2] = m[1][2] ^ gf4096_mul(m[0][2], minv1);
- m[1][4] = m[1][4] ^ gf4096_mul(m[0][4], minv1);
- m[2][0] = m[2][0] ^ gf4096_mul(m[0][0], minv2);
- m[2][1] = m[2][1] ^ gf4096_mul(m[0][1], minv2);
- m[2][2] = m[2][2] ^ gf4096_mul(m[0][2], minv2);
- m[2][4] = m[2][4] ^ gf4096_mul(m[0][4], minv2);
- m[3][0] = m[3][0] ^ gf4096_mul(m[0][0], minv3);
- m[3][1] = m[3][1] ^ gf4096_mul(m[0][1], minv3);
- m[3][2] = m[3][2] ^ gf4096_mul(m[0][2], minv3);
- m[3][4] = m[3][4] ^ gf4096_mul(m[0][4], minv3);
-}
-
-static void zero_4x5_col2(unsigned short m[4][5])
-{
- unsigned short minv2, minv3;
-
- minv2 = gf4096_mul(m[2][2], gf4096_inv(m[1][2]));
- minv3 = gf4096_mul(m[3][2], gf4096_inv(m[1][2]));
-
- m[2][0] = m[2][0] ^ gf4096_mul(m[1][0], minv2);
- m[2][1] = m[2][1] ^ gf4096_mul(m[1][1], minv2);
- m[2][4] = m[2][4] ^ gf4096_mul(m[1][4], minv2);
- m[3][0] = m[3][0] ^ gf4096_mul(m[1][0], minv3);
- m[3][1] = m[3][1] ^ gf4096_mul(m[1][1], minv3);
- m[3][4] = m[3][4] ^ gf4096_mul(m[1][4], minv3);
-}
-
-static void zero_4x5_col1(unsigned short m[4][5])
-{
- unsigned short minv;
-
- minv = gf4096_mul(m[3][1], gf4096_inv(m[2][1]));
-
- m[3][0] = m[3][0] ^ gf4096_mul(m[2][0], minv);
- m[3][4] = m[3][4] ^ gf4096_mul(m[2][4], minv);
-}
-
-static void swap_4x5_rows(unsigned short m[4][5], int i, int j, int col_width)
-{
- unsigned short tmp0;
- int cnt;
-
- for (cnt = 0; cnt < col_width; cnt++) {
- tmp0 = m[i][cnt];
- m[i][cnt] = m[j][cnt];
- m[j][cnt] = tmp0;
- }
-}
-
-static void solve_4x5(unsigned short m[4][5], unsigned short *coefs, int *row_order)
-{
- unsigned short tmp[4];
-
- tmp[0] = gf4096_mul(m[3][4], gf4096_inv(m[3][0]));
- tmp[1] = gf4096_mul((gf4096_mul(tmp[0], m[2][0]) ^ m[2][4]), gf4096_inv(m[2][1]));
- tmp[2] = gf4096_mul((gf4096_mul(tmp[0], m[1][0]) ^ gf4096_mul(tmp[1], m[1][1]) ^ m[1][4]), gf4096_inv(m[1][2]));
- tmp[3] = gf4096_mul((gf4096_mul(tmp[0], m[0][0]) ^
- gf4096_mul(tmp[1], m[0][1]) ^ gf4096_mul(tmp[2], m[0][2]) ^ m[0][4]), gf4096_inv(m[0][3]));
- sort_coefs(row_order, tmp, 4);
- coefs[0] = tmp[0];
- coefs[1] = tmp[1];
- coefs[2] = tmp[2];
- coefs[3] = tmp[3];
-}
-
-static void sort_coefs(int *order, unsigned short *soln, int len)
-{
- int cnt, start_cnt, least_ord, least_cnt;
- unsigned short tmp0;
- for (start_cnt = 0; start_cnt < len; start_cnt++) {
- for (cnt = start_cnt; cnt < len; cnt++) {
- if (cnt == start_cnt) {
- least_ord = order[cnt];
- least_cnt = start_cnt;
- } else {
- if (least_ord > order[cnt]) {
- least_ord = order[cnt];
- least_cnt = cnt;
- }
- }
- }
- if (least_cnt != start_cnt) {
- tmp0 = order[least_cnt];
- order[least_cnt] = order[start_cnt];
- order[start_cnt] = tmp0;
- tmp0 = soln[least_cnt];
- soln[least_cnt] = soln[start_cnt];
- soln[start_cnt] = tmp0;
- }
- }
-}
-
-static void find_4bit_err_pats(unsigned short s0, unsigned short s1,
- unsigned short s2, unsigned short s3,
- unsigned short z1, unsigned short z2,
- unsigned short z3, unsigned short z4,
- unsigned short *pats)
-{
- unsigned short z4_z1, z3z4_z3z3, z4_z2, s0z4_s1, z1z4_z1z1,
- z4_z3, z2z4_z2z2, s1z4_s2, z3z3z4_z3z3z3, z1z1z4_z1z1z1, z2z2z4_z2z2z2, s2z4_s3;
- unsigned short tmp0, tmp1, tmp2, tmp3;
-
- z4_z1 = z4 ^ z1;
- z3z4_z3z3 = gf4096_mul(z3, z4) ^ gf4096_mul(z3, z3);
- z4_z2 = z4 ^ z2;
- s0z4_s1 = gf4096_mul(s0, z4) ^ s1;
- z1z4_z1z1 = gf4096_mul(z1, z4) ^ gf4096_mul(z1, z1);
- z4_z3 = z4 ^ z3;
- z2z4_z2z2 = gf4096_mul(z2, z4) ^ gf4096_mul(z2, z2);
- s1z4_s2 = gf4096_mul(s1, z4) ^ s2;
- z3z3z4_z3z3z3 = gf4096_mul(gf4096_mul(z3, z3), z4) ^ gf4096_mul(gf4096_mul(z3, z3), z3);
- z1z1z4_z1z1z1 = gf4096_mul(gf4096_mul(z1, z1), z4) ^ gf4096_mul(gf4096_mul(z1, z1), z1);
- z2z2z4_z2z2z2 = gf4096_mul(gf4096_mul(z2, z2), z4) ^ gf4096_mul(gf4096_mul(z2, z2), z2);
- s2z4_s3 = gf4096_mul(s2, z4) ^ s3;
-
- //find err pat 0,1
- find_2x2_soln(gf4096_mul(z4_z1, z3z4_z3z3) ^
- gf4096_mul(z1z4_z1z1, z4_z3), gf4096_mul(z4_z2,
- z3z4_z3z3) ^
- gf4096_mul(z2z4_z2z2, z4_z3), gf4096_mul(z1z4_z1z1,
- z3z3z4_z3z3z3) ^
- gf4096_mul(z1z1z4_z1z1z1, z3z4_z3z3),
- gf4096_mul(z2z4_z2z2,
- z3z3z4_z3z3z3) ^ gf4096_mul(z2z2z4_z2z2z2,
- z3z4_z3z3),
- gf4096_mul(s0z4_s1, z3z4_z3z3) ^ gf4096_mul(s1z4_s2,
- z4_z3),
- gf4096_mul(s1z4_s2, z3z3z4_z3z3z3) ^ gf4096_mul(s2z4_s3, z3z4_z3z3), pats);
- tmp0 = pats[0];
- tmp1 = pats[1];
- tmp2 = pats[0] ^ pats[1] ^ s0;
- tmp3 = gf4096_mul(pats[0], z1) ^ gf4096_mul(pats[1], z2) ^ s1;
-
- //find err pat 2,3
- find_2x2_soln(0x1, 0x1, z3, z4, tmp2, tmp3, pats);
- pats[2] = pats[0];
- pats[3] = pats[1];
- pats[0] = tmp0;
- pats[1] = tmp1;
-}
-
-static void find_2x2_soln(unsigned short c00, unsigned short c01,
- unsigned short c10, unsigned short c11,
- unsigned short lval0, unsigned short lval1,
- unsigned short *soln)
-{
- unsigned short m[2][3];
- m[0][0] = c00;
- m[0][1] = c01;
- m[0][2] = lval0;
- m[1][0] = c10;
- m[1][1] = c11;
- m[1][2] = lval1;
-
- if (m[0][1] != 0x0) {
- /* */
- } else if (m[1][1] != 0x0) {
- swap_2x3_rows(m);
- } else {
- printk(KERN_ERR "Warning: find_2bit_err_coefs, s0,s1 all zeros!\n");
- }
-
- solve_2x3(m, soln);
-}
-
-static void swap_2x3_rows(unsigned short m[2][3])
-{
- unsigned short tmp0;
- int cnt;
-
- for (cnt = 0; cnt < 3; cnt++) {
- tmp0 = m[0][cnt];
- m[0][cnt] = m[1][cnt];
- m[1][cnt] = tmp0;
- }
-}
-
-static void solve_2x3(unsigned short m[2][3], unsigned short *coefs)
-{
- unsigned short minv;
-
- minv = gf4096_mul(m[1][1], gf4096_inv(m[0][1]));
- m[1][0] = m[1][0] ^ gf4096_mul(m[0][0], minv);
- m[1][2] = m[1][2] ^ gf4096_mul(m[0][2], minv);
- coefs[0] = gf4096_mul(m[1][2], gf4096_inv(m[1][0]));
- coefs[1] = gf4096_mul((gf4096_mul(coefs[0], m[0][0]) ^ m[0][2]), gf4096_inv(m[0][1]));
-}
-
-static unsigned char gf64_inv[64] = {
- 0, 1, 33, 62, 49, 43, 31, 44, 57, 37, 52, 28, 46, 40, 22, 25,
- 61, 54, 51, 39, 26, 35, 14, 24, 23, 15, 20, 34, 11, 53, 45, 6,
- 63, 2, 27, 21, 56, 9, 50, 19, 13, 47, 48, 5, 7, 30, 12, 41,
- 42, 4, 38, 18, 10, 29, 17, 60, 36, 8, 59, 58, 55, 16, 3, 32
-};
-
-static unsigned short gf4096_inv(unsigned short din)
-{
- unsigned short alahxal, ah2B, deno, inv, bl, bh;
- unsigned short ah, al, ahxal;
- unsigned short dout;
-
- ah = (din >> 6) & 0x3f;
- al = din & 0x3f;
- ahxal = ah ^ al;
- ah2B = (((ah ^ (ah >> 3)) & 0x1) << 5) |
- ((ah >> 1) & 0x10) |
- ((((ah >> 5) ^ (ah >> 2)) & 0x1) << 3) |
- ((ah >> 2) & 0x4) | ((((ah >> 4) ^ (ah >> 1)) & 0x1) << 1) | (ah & 0x1);
- alahxal = gf64_mul(ahxal, al);
- deno = alahxal ^ ah2B;
- inv = gf64_inv[deno];
- bl = gf64_mul(inv, ahxal);
- bh = gf64_mul(inv, ah);
- dout = ((bh & 0x3f) << 6) | (bl & 0x3f);
- return (((bh & 0x3f) << 6) | (bl & 0x3f));
-}
-
-static unsigned short err_pos_lut[4096] = {
- 0xfff, 0x000, 0x451, 0xfff, 0xfff, 0x3cf, 0xfff, 0x041,
- 0xfff, 0xfff, 0xfff, 0xfff, 0x28a, 0xfff, 0x492, 0xfff,
- 0x145, 0xfff, 0xfff, 0x514, 0xfff, 0x082, 0xfff, 0xfff,
- 0xfff, 0x249, 0x38e, 0x410, 0xfff, 0x104, 0x208, 0x1c7,
- 0xfff, 0xfff, 0xfff, 0xfff, 0x2cb, 0xfff, 0xfff, 0xfff,
- 0x0c3, 0x34d, 0x4d3, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
- 0xfff, 0xfff, 0xfff, 0x186, 0xfff, 0xfff, 0xfff, 0xfff,
- 0xfff, 0x30c, 0x555, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
- 0xfff, 0xfff, 0xfff, 0x166, 0xfff, 0xfff, 0xfff, 0xfff,
- 0x385, 0x14e, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x4e1,
- 0xfff, 0xfff, 0xfff, 0xfff, 0x538, 0xfff, 0x16d, 0xfff,
- 0xfff, 0xfff, 0x45b, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
- 0xfff, 0xfff, 0xfff, 0x29c, 0x2cc, 0x30b, 0x2b3, 0xfff,
- 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x0b3, 0xfff, 0x2f7,
- 0xfff, 0x32b, 0xfff, 0xfff, 0xfff, 0xfff, 0x0a7, 0xfff,
- 0xfff, 0x2da, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
- 0xfff, 0x07e, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
- 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x11c, 0xfff, 0xfff,
- 0xfff, 0xfff, 0xfff, 0x22f, 0xfff, 0x1f4, 0xfff, 0xfff,
- 0x2b0, 0x504, 0xfff, 0x114, 0xfff, 0xfff, 0xfff, 0x21d,
- 0xfff, 0xfff, 0xfff, 0xfff, 0x00d, 0x3c4, 0x340, 0x10f,
- 0xfff, 0xfff, 0x266, 0x02e, 0xfff, 0xfff, 0xfff, 0x4f8,
- 0x337, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
- 0xfff, 0xfff, 0xfff, 0x07b, 0x168, 0xfff, 0xfff, 0x0fe,
- 0xfff, 0xfff, 0x51a, 0xfff, 0x458, 0xfff, 0x36d, 0xfff,
- 0xfff, 0xfff, 0xfff, 0x073, 0x37d, 0x415, 0x550, 0xfff,
- 0xfff, 0xfff, 0x23b, 0x4b4, 0xfff, 0xfff, 0xfff, 0x1a1,
- 0xfff, 0xfff, 0x3aa, 0xfff, 0x117, 0x04d, 0x341, 0xfff,
- 0xfff, 0xfff, 0xfff, 0x518, 0x03e, 0x0f2, 0xfff, 0xfff,
- 0xfff, 0xfff, 0xfff, 0x363, 0xfff, 0x0b9, 0xfff, 0xfff,
- 0x241, 0xfff, 0xfff, 0x049, 0xfff, 0xfff, 0xfff, 0xfff,
- 0x15f, 0x52d, 0xfff, 0xfff, 0xfff, 0x29e, 0xfff, 0xfff,
- 0xfff, 0xfff, 0x4cf, 0x0fc, 0xfff, 0x36f, 0x3d3, 0xfff,
- 0x228, 0xfff, 0xfff, 0x45e, 0xfff, 0xfff, 0xfff, 0xfff,
- 0x238, 0xfff, 0xfff, 0xfff, 0xfff, 0x47f, 0xfff, 0xfff,
- 0x43a, 0x265, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x3e8,
- 0xfff, 0xfff, 0x01a, 0xfff, 0xfff, 0xfff, 0xfff, 0x21e,
- 0x1fc, 0x40b, 0xfff, 0xfff, 0xfff, 0x2d0, 0x159, 0xfff,
- 0xfff, 0x313, 0xfff, 0xfff, 0x05c, 0x4cc, 0xfff, 0xfff,
- 0x0f6, 0x3d5, 0xfff, 0xfff, 0xfff, 0x54f, 0xfff, 0xfff,
- 0xfff, 0x172, 0x1e4, 0x07c, 0xfff, 0xfff, 0xfff, 0xfff,
- 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x53c, 0x1ad, 0x535,
- 0x19b, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
- 0xfff, 0xfff, 0x092, 0xfff, 0x2be, 0xfff, 0xfff, 0x482,
- 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x0e6, 0xfff, 0xfff,
- 0xfff, 0xfff, 0xfff, 0x476, 0xfff, 0x51d, 0xfff, 0xfff,
- 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
- 0xfff, 0xfff, 0x342, 0x2b5, 0x22e, 0x09a, 0xfff, 0x08d,
- 0x44f, 0x3ed, 0xfff, 0xfff, 0xfff, 0xfff, 0x3d1, 0xfff,
- 0xfff, 0x543, 0xfff, 0x48f, 0xfff, 0x3d2, 0xfff, 0x0d5,
- 0x113, 0x0ec, 0x427, 0xfff, 0xfff, 0xfff, 0x4c4, 0xfff,
- 0xfff, 0x50a, 0xfff, 0x144, 0xfff, 0x105, 0x39f, 0x294,
- 0x164, 0xfff, 0x31a, 0xfff, 0xfff, 0x49a, 0xfff, 0x130,
- 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
- 0x1be, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
- 0xfff, 0xfff, 0x49e, 0x371, 0xfff, 0xfff, 0xfff, 0xfff,
- 0xfff, 0xfff, 0xfff, 0xfff, 0x0e8, 0x49c, 0x0f4, 0xfff,
- 0x338, 0x1a7, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
- 0xfff, 0x36c, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
- 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
- 0xfff, 0x1ae, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
- 0xfff, 0x31b, 0xfff, 0xfff, 0x2dd, 0x522, 0xfff, 0xfff,
- 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x2f4,
- 0x3c6, 0x30d, 0xfff, 0xfff, 0xfff, 0xfff, 0x34c, 0x18f,
- 0x30a, 0xfff, 0x01f, 0x079, 0xfff, 0xfff, 0x54d, 0x46b,
- 0x28c, 0x37f, 0xfff, 0xfff, 0xfff, 0xfff, 0x355, 0xfff,
- 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x14f, 0xfff, 0xfff,
- 0xfff, 0xfff, 0xfff, 0x359, 0x3fe, 0x3c5, 0xfff, 0xfff,
- 0xfff, 0xfff, 0x423, 0xfff, 0xfff, 0x34a, 0x22c, 0xfff,
- 0x25a, 0xfff, 0xfff, 0x4ad, 0xfff, 0x28d, 0xfff, 0xfff,
- 0xfff, 0xfff, 0xfff, 0x547, 0xfff, 0xfff, 0xfff, 0xfff,
- 0x2e2, 0xfff, 0xfff, 0x1d5, 0xfff, 0x2a8, 0xfff, 0xfff,
- 0x03f, 0xfff, 0xfff, 0xfff, 0xfff, 0x3eb, 0x0fa, 0xfff,
- 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x55b, 0xfff,
- 0x08e, 0xfff, 0x3ae, 0xfff, 0x3a4, 0xfff, 0x282, 0x158,
- 0xfff, 0x382, 0xfff, 0xfff, 0x499, 0xfff, 0xfff, 0x08a,
- 0xfff, 0xfff, 0xfff, 0x456, 0x3be, 0xfff, 0x1e2, 0xfff,
- 0xfff, 0xfff, 0xfff, 0xfff, 0x559, 0xfff, 0x1a0, 0xfff,
- 0xfff, 0x0b4, 0xfff, 0xfff, 0xfff, 0x2df, 0xfff, 0xfff,
- 0xfff, 0x07f, 0x4f5, 0xfff, 0xfff, 0x27c, 0x133, 0x017,
- 0xfff, 0x3fd, 0xfff, 0xfff, 0xfff, 0x44d, 0x4cd, 0x17a,
- 0x0d7, 0x537, 0xfff, 0xfff, 0x353, 0xfff, 0xfff, 0x351,
- 0x366, 0xfff, 0x44a, 0xfff, 0x1a6, 0xfff, 0xfff, 0xfff,
- 0x291, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x1e3,
- 0xfff, 0xfff, 0xfff, 0xfff, 0x389, 0xfff, 0x07a, 0xfff,
- 0x1b6, 0x2ed, 0xfff, 0xfff, 0xfff, 0xfff, 0x24e, 0x074,
- 0xfff, 0xfff, 0x3dc, 0xfff, 0x4e3, 0xfff, 0xfff, 0xfff,
- 0xfff, 0x4eb, 0xfff, 0xfff, 0x3b8, 0x4de, 0xfff, 0x19c,
- 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x262,
- 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x076, 0x4e8, 0x3da,
- 0xfff, 0x531, 0xfff, 0xfff, 0x14a, 0xfff, 0x0a2, 0x433,
- 0x3df, 0x1e9, 0xfff, 0xfff, 0xfff, 0xfff, 0x3e7, 0x285,
- 0x2d8, 0xfff, 0xfff, 0xfff, 0x349, 0x18d, 0x098, 0xfff,
- 0x0df, 0x4bf, 0xfff, 0xfff, 0x0b2, 0xfff, 0x346, 0x24d,
- 0xfff, 0xfff, 0xfff, 0x24f, 0x4fa, 0x2f9, 0xfff, 0xfff,
- 0x3c9, 0xfff, 0x2b4, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
- 0xfff, 0x056, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
- 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
- 0xfff, 0x179, 0xfff, 0x0e9, 0x3f0, 0x33d, 0xfff, 0xfff,
- 0xfff, 0xfff, 0xfff, 0x1fd, 0xfff, 0xfff, 0x526, 0xfff,
- 0xfff, 0xfff, 0x53d, 0xfff, 0xfff, 0xfff, 0x170, 0x331,
- 0xfff, 0x068, 0xfff, 0xfff, 0xfff, 0x3f7, 0xfff, 0x3d8,
- 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
- 0xfff, 0x09f, 0x556, 0xfff, 0xfff, 0x02d, 0xfff, 0xfff,
- 0x553, 0xfff, 0xfff, 0xfff, 0x1f0, 0xfff, 0xfff, 0x4d6,
- 0x41e, 0xfff, 0xfff, 0xfff, 0xfff, 0x4d5, 0xfff, 0xfff,
- 0xfff, 0xfff, 0xfff, 0x248, 0xfff, 0xfff, 0xfff, 0x0a3,
- 0xfff, 0x217, 0xfff, 0xfff, 0xfff, 0x4f1, 0x209, 0xfff,
- 0xfff, 0x475, 0x234, 0x52b, 0x398, 0xfff, 0x08b, 0xfff,
- 0xfff, 0xfff, 0xfff, 0x2c2, 0xfff, 0xfff, 0xfff, 0xfff,
- 0xfff, 0xfff, 0x268, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
- 0xfff, 0x4a3, 0xfff, 0x0aa, 0xfff, 0x1d9, 0xfff, 0xfff,
- 0xfff, 0xfff, 0x155, 0xfff, 0xfff, 0xfff, 0xfff, 0x0bf,
- 0x539, 0xfff, 0xfff, 0x2f1, 0x545, 0xfff, 0xfff, 0xfff,
- 0xfff, 0xfff, 0xfff, 0x2a7, 0x06f, 0xfff, 0x378, 0xfff,
- 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x25e, 0xfff,
- 0xfff, 0xfff, 0xfff, 0x15d, 0x02a, 0xfff, 0xfff, 0x0bc,
- 0x235, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
- 0x150, 0xfff, 0x1a9, 0xfff, 0xfff, 0xfff, 0xfff, 0x381,
- 0xfff, 0x04e, 0x270, 0x13f, 0xfff, 0xfff, 0x405, 0xfff,
- 0x3cd, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
- 0xfff, 0x2ef, 0xfff, 0x06a, 0xfff, 0xfff, 0xfff, 0x34f,
- 0x212, 0xfff, 0xfff, 0x0e2, 0xfff, 0x083, 0x298, 0xfff,
- 0xfff, 0xfff, 0x0c2, 0xfff, 0xfff, 0x52e, 0xfff, 0x488,
- 0xfff, 0xfff, 0xfff, 0x36b, 0xfff, 0xfff, 0xfff, 0x442,
- 0x091, 0xfff, 0x41c, 0xfff, 0xfff, 0x3a5, 0xfff, 0x4e6,
- 0xfff, 0xfff, 0x40d, 0x31d, 0xfff, 0xfff, 0xfff, 0x4c1,
- 0x053, 0xfff, 0x418, 0x13c, 0xfff, 0x350, 0xfff, 0x0ae,
- 0xfff, 0xfff, 0x41f, 0xfff, 0x470, 0xfff, 0x4ca, 0xfff,
- 0xfff, 0xfff, 0x02b, 0x450, 0xfff, 0x1f8, 0xfff, 0xfff,
- 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x293, 0xfff,
- 0xfff, 0xfff, 0xfff, 0x411, 0xfff, 0xfff, 0xfff, 0xfff,
- 0xfff, 0xfff, 0xfff, 0xfff, 0x0b8, 0xfff, 0xfff, 0xfff,
- 0x3e1, 0xfff, 0xfff, 0xfff, 0xfff, 0x43c, 0xfff, 0x2b2,
- 0x2ab, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x1ec,
- 0xfff, 0xfff, 0xfff, 0x3f8, 0x034, 0xfff, 0xfff, 0xfff,
- 0xfff, 0xfff, 0xfff, 0x11a, 0xfff, 0x541, 0x45c, 0x134,
- 0x1cc, 0xfff, 0xfff, 0xfff, 0x469, 0xfff, 0xfff, 0x44b,
- 0x161, 0xfff, 0xfff, 0xfff, 0x055, 0xfff, 0xfff, 0xfff,
- 0xfff, 0x307, 0xfff, 0xfff, 0xfff, 0xfff, 0x2d1, 0xfff,
- 0xfff, 0xfff, 0x124, 0x37b, 0x26b, 0x336, 0xfff, 0xfff,
- 0x2e4, 0x3cb, 0xfff, 0xfff, 0x0f8, 0x3c8, 0xfff, 0xfff,
- 0xfff, 0x461, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x4b5,
- 0x2cf, 0xfff, 0xfff, 0xfff, 0x20f, 0xfff, 0x35a, 0xfff,
- 0x490, 0xfff, 0x185, 0xfff, 0xfff, 0xfff, 0xfff, 0x42e,
- 0xfff, 0xfff, 0xfff, 0xfff, 0x54b, 0xfff, 0xfff, 0xfff,
- 0x146, 0xfff, 0x412, 0xfff, 0xfff, 0xfff, 0x1ff, 0xfff,
- 0xfff, 0x3e0, 0xfff, 0xfff, 0xfff, 0xfff, 0x2d5, 0xfff,
- 0x4df, 0x505, 0xfff, 0x413, 0xfff, 0x1a5, 0xfff, 0x3b2,
- 0xfff, 0xfff, 0xfff, 0x35b, 0xfff, 0x116, 0xfff, 0xfff,
- 0x171, 0x4d0, 0xfff, 0x154, 0x12d, 0xfff, 0xfff, 0xfff,
- 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x468, 0x4db, 0xfff,
- 0xfff, 0x1df, 0xfff, 0xfff, 0xfff, 0xfff, 0x05a, 0xfff,
- 0x0f1, 0x403, 0xfff, 0x22b, 0x2e0, 0xfff, 0xfff, 0xfff,
- 0x2b7, 0x373, 0xfff, 0xfff, 0xfff, 0xfff, 0x13e, 0xfff,
- 0xfff, 0xfff, 0x0d0, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
- 0x329, 0x1d2, 0x3fa, 0x047, 0xfff, 0x2f2, 0xfff, 0xfff,
- 0x141, 0x0ac, 0x1d7, 0xfff, 0x07d, 0xfff, 0xfff, 0xfff,
- 0x1c1, 0xfff, 0x487, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
- 0xfff, 0xfff, 0xfff, 0x045, 0xfff, 0xfff, 0xfff, 0xfff,
- 0x288, 0x0cd, 0xfff, 0xfff, 0xfff, 0xfff, 0x226, 0x1d8,
- 0xfff, 0x153, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x4cb,
- 0x528, 0xfff, 0xfff, 0xfff, 0x20a, 0x343, 0x3a1, 0xfff,
- 0xfff, 0xfff, 0x2d7, 0x2d3, 0x1aa, 0x4c5, 0xfff, 0xfff,
- 0xfff, 0x42b, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
- 0xfff, 0xfff, 0xfff, 0xfff, 0x3e9, 0xfff, 0x20b, 0x260,
- 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x37c, 0x2fd,
- 0xfff, 0xfff, 0x2c8, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
- 0xfff, 0x31e, 0xfff, 0x335, 0xfff, 0xfff, 0xfff, 0xfff,
- 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
- 0xfff, 0xfff, 0x135, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
- 0xfff, 0xfff, 0x35c, 0x4dd, 0x129, 0xfff, 0xfff, 0xfff,
- 0xfff, 0xfff, 0x1ef, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
- 0xfff, 0x34e, 0xfff, 0xfff, 0xfff, 0xfff, 0x407, 0xfff,
- 0xfff, 0xfff, 0xfff, 0xfff, 0x3ad, 0xfff, 0xfff, 0xfff,
- 0x379, 0xfff, 0xfff, 0x1d0, 0x38d, 0xfff, 0xfff, 0x1e8,
- 0x184, 0x3c1, 0x1c4, 0xfff, 0x1f9, 0xfff, 0xfff, 0x424,
- 0xfff, 0xfff, 0xfff, 0xfff, 0x1d3, 0x0d4, 0xfff, 0x4e9,
- 0xfff, 0xfff, 0xfff, 0x530, 0x107, 0xfff, 0x106, 0x04f,
- 0xfff, 0xfff, 0x4c7, 0x503, 0xfff, 0xfff, 0xfff, 0xfff,
- 0xfff, 0x15c, 0xfff, 0x23f, 0xfff, 0xfff, 0xfff, 0xfff,
- 0xfff, 0xfff, 0xfff, 0xfff, 0x4f3, 0xfff, 0xfff, 0x3c7,
- 0xfff, 0x278, 0xfff, 0xfff, 0x0a6, 0xfff, 0xfff, 0xfff,
- 0x122, 0x1cf, 0xfff, 0x327, 0xfff, 0x2e5, 0xfff, 0x29d,
- 0xfff, 0xfff, 0x3f1, 0xfff, 0xfff, 0x48d, 0xfff, 0xfff,
- 0xfff, 0xfff, 0x054, 0xfff, 0xfff, 0xfff, 0xfff, 0x178,
- 0x27e, 0x4e0, 0x352, 0x02f, 0x09c, 0xfff, 0x2a0, 0xfff,
- 0xfff, 0x46a, 0x457, 0xfff, 0xfff, 0x501, 0xfff, 0x2ba,
- 0xfff, 0xfff, 0xfff, 0x54e, 0x2e7, 0xfff, 0xfff, 0xfff,
- 0xfff, 0xfff, 0x551, 0xfff, 0xfff, 0x1db, 0x2aa, 0xfff,
- 0xfff, 0x4bc, 0xfff, 0xfff, 0x395, 0xfff, 0x0de, 0xfff,
- 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x455, 0xfff, 0x17e,
- 0xfff, 0x221, 0x4a7, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
- 0x388, 0xfff, 0xfff, 0xfff, 0x308, 0xfff, 0xfff, 0xfff,
- 0x20e, 0x4b9, 0xfff, 0x273, 0x20c, 0x09e, 0xfff, 0x057,
- 0xfff, 0xfff, 0xfff, 0xfff, 0x3f2, 0xfff, 0x1a8, 0x3a6,
- 0x14c, 0xfff, 0xfff, 0x071, 0xfff, 0xfff, 0x53a, 0xfff,
- 0xfff, 0xfff, 0xfff, 0x109, 0xfff, 0xfff, 0x399, 0xfff,
- 0x061, 0x4f0, 0x39e, 0x244, 0xfff, 0x035, 0xfff, 0xfff,
- 0x305, 0x47e, 0x297, 0xfff, 0xfff, 0x2b8, 0xfff, 0xfff,
- 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x1bc, 0xfff, 0x2fc,
- 0xfff, 0xfff, 0x554, 0xfff, 0xfff, 0xfff, 0xfff, 0x3b6,
- 0xfff, 0xfff, 0xfff, 0x515, 0x397, 0xfff, 0xfff, 0x12f,
- 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x4e5,
- 0xfff, 0x4fc, 0xfff, 0xfff, 0x05e, 0xfff, 0xfff, 0xfff,
- 0xfff, 0xfff, 0x0a8, 0x3af, 0x015, 0xfff, 0xfff, 0xfff,
- 0xfff, 0x138, 0xfff, 0xfff, 0xfff, 0x540, 0xfff, 0xfff,
- 0xfff, 0x027, 0x523, 0x2f0, 0xfff, 0xfff, 0xfff, 0xfff,
- 0xfff, 0xfff, 0x16c, 0xfff, 0x27d, 0xfff, 0xfff, 0xfff,
- 0xfff, 0x04c, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x4dc,
- 0xfff, 0xfff, 0x059, 0x301, 0xfff, 0xfff, 0xfff, 0xfff,
- 0xfff, 0xfff, 0xfff, 0x1a3, 0xfff, 0x15a, 0xfff, 0xfff,
- 0x0a5, 0xfff, 0x435, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
- 0xfff, 0x051, 0xfff, 0xfff, 0x131, 0xfff, 0x4f4, 0xfff,
- 0xfff, 0xfff, 0xfff, 0x441, 0xfff, 0x4fb, 0xfff, 0x03b,
- 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x1ed, 0x274,
- 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x0d3, 0x55e, 0x1b3,
- 0xfff, 0x0bd, 0xfff, 0xfff, 0xfff, 0xfff, 0x225, 0xfff,
- 0xfff, 0xfff, 0xfff, 0xfff, 0x4b7, 0xfff, 0xfff, 0x2ff,
- 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x4c3, 0xfff,
- 0x383, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x2f6,
- 0xfff, 0xfff, 0x1ee, 0xfff, 0x03d, 0xfff, 0xfff, 0xfff,
- 0xfff, 0xfff, 0x26f, 0x1dc, 0xfff, 0x0db, 0xfff, 0xfff,
- 0xfff, 0xfff, 0xfff, 0x0ce, 0xfff, 0xfff, 0x127, 0x03a,
- 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x311, 0xfff,
- 0xfff, 0x13d, 0x09d, 0x47b, 0x2a6, 0x50d, 0x510, 0x19a,
- 0xfff, 0x354, 0x414, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
- 0xfff, 0xfff, 0x44c, 0x3b0, 0xfff, 0x23d, 0x429, 0xfff,
- 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
- 0x4c0, 0x416, 0xfff, 0x05b, 0xfff, 0xfff, 0x137, 0xfff,
- 0x25f, 0x49f, 0xfff, 0x279, 0x013, 0xfff, 0xfff, 0xfff,
- 0x269, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
- 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x3d0, 0xfff, 0xfff,
- 0xfff, 0xfff, 0xfff, 0xfff, 0x077, 0xfff, 0xfff, 0x3fb,
- 0xfff, 0xfff, 0xfff, 0xfff, 0x271, 0x3a0, 0xfff, 0xfff,
- 0x40f, 0xfff, 0xfff, 0x3de, 0xfff, 0xfff, 0xfff, 0xfff,
- 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x1ab, 0x26a,
- 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x489, 0xfff, 0xfff,
- 0x252, 0xfff, 0xfff, 0xfff, 0xfff, 0x1b7, 0x42f, 0xfff,
- 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x3b7,
- 0xfff, 0x2bb, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
- 0xfff, 0xfff, 0xfff, 0x0f7, 0x01d, 0xfff, 0x067, 0xfff,
- 0xfff, 0xfff, 0xfff, 0x4e2, 0xfff, 0xfff, 0x4bb, 0xfff,
- 0xfff, 0xfff, 0x17b, 0xfff, 0x0ee, 0xfff, 0xfff, 0xfff,
- 0xfff, 0xfff, 0x36e, 0xfff, 0xfff, 0xfff, 0x533, 0xfff,
- 0xfff, 0xfff, 0x4d4, 0x356, 0xfff, 0xfff, 0x375, 0xfff,
- 0xfff, 0xfff, 0xfff, 0x4a4, 0x513, 0xfff, 0xfff, 0xfff,
- 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x4ff, 0xfff, 0x2af,
- 0xfff, 0xfff, 0x026, 0xfff, 0x0ad, 0xfff, 0xfff, 0xfff,
- 0xfff, 0x26e, 0xfff, 0xfff, 0xfff, 0xfff, 0x493, 0xfff,
- 0x463, 0x4d2, 0x4be, 0xfff, 0xfff, 0xfff, 0xfff, 0x4f2,
- 0x0b6, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
- 0xfff, 0x32d, 0x315, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
- 0xfff, 0x13a, 0x4a1, 0xfff, 0x27a, 0xfff, 0xfff, 0xfff,
- 0x47a, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
- 0x334, 0xfff, 0xfff, 0xfff, 0xfff, 0x54c, 0xfff, 0xfff,
- 0xfff, 0x0c9, 0x007, 0xfff, 0xfff, 0x12e, 0xfff, 0x0ff,
- 0xfff, 0xfff, 0x3f5, 0x509, 0xfff, 0xfff, 0xfff, 0xfff,
- 0x1c3, 0x2ad, 0xfff, 0xfff, 0x47c, 0x261, 0xfff, 0xfff,
- 0xfff, 0xfff, 0xfff, 0x152, 0xfff, 0xfff, 0xfff, 0x339,
- 0xfff, 0x243, 0x1c0, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
- 0x063, 0xfff, 0xfff, 0x254, 0xfff, 0xfff, 0x173, 0xfff,
- 0x0c7, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
- 0xfff, 0x362, 0x259, 0x485, 0x374, 0x0dc, 0x3ab, 0xfff,
- 0x1c5, 0x534, 0x544, 0xfff, 0xfff, 0x508, 0xfff, 0x402,
- 0x408, 0xfff, 0x0e7, 0xfff, 0xfff, 0x00a, 0x205, 0xfff,
- 0xfff, 0x2b9, 0xfff, 0xfff, 0xfff, 0x465, 0xfff, 0xfff,
- 0xfff, 0xfff, 0xfff, 0xfff, 0x23a, 0xfff, 0xfff, 0xfff,
- 0xfff, 0x147, 0x19d, 0x115, 0x214, 0xfff, 0x090, 0x368,
- 0xfff, 0x210, 0xfff, 0xfff, 0x280, 0x52a, 0x163, 0x148,
- 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x326, 0xfff, 0xfff,
- 0xfff, 0xfff, 0xfff, 0x2de, 0xfff, 0xfff, 0xfff, 0xfff,
- 0x206, 0x2c1, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
- 0x189, 0xfff, 0xfff, 0xfff, 0xfff, 0x367, 0xfff, 0x1a4,
- 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x443, 0xfff, 0x27b,
- 0xfff, 0xfff, 0x251, 0x549, 0xfff, 0xfff, 0xfff, 0xfff,
- 0xfff, 0xfff, 0x188, 0x04b, 0xfff, 0xfff, 0xfff, 0x31f,
- 0x4a6, 0xfff, 0x246, 0x1de, 0x156, 0xfff, 0xfff, 0xfff,
- 0x3a9, 0xfff, 0xfff, 0xfff, 0x2fa, 0xfff, 0x128, 0x0d1,
- 0x449, 0x255, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
- 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
- 0xfff, 0xfff, 0xfff, 0xfff, 0x258, 0xfff, 0xfff, 0xfff,
- 0x532, 0xfff, 0xfff, 0xfff, 0x303, 0x517, 0xfff, 0xfff,
- 0x2a9, 0x24a, 0xfff, 0xfff, 0x231, 0xfff, 0xfff, 0xfff,
- 0xfff, 0xfff, 0x4b6, 0x516, 0xfff, 0xfff, 0x0e4, 0x0eb,
- 0xfff, 0x4e4, 0xfff, 0x275, 0xfff, 0xfff, 0x031, 0xfff,
- 0xfff, 0xfff, 0xfff, 0xfff, 0x025, 0x21a, 0xfff, 0x0cc,
- 0x45f, 0x3d9, 0x289, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
- 0xfff, 0xfff, 0x23e, 0xfff, 0xfff, 0xfff, 0x438, 0x097,
- 0x419, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
- 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
- 0xfff, 0xfff, 0x0a9, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
- 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
- 0x37e, 0x0e0, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x431,
- 0x372, 0xfff, 0xfff, 0xfff, 0x1ba, 0x06e, 0xfff, 0x1b1,
- 0xfff, 0xfff, 0x12a, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
- 0xfff, 0xfff, 0x193, 0xfff, 0xfff, 0xfff, 0xfff, 0x10a,
- 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x048, 0x1b4,
- 0xfff, 0xfff, 0xfff, 0xfff, 0x295, 0x140, 0x108, 0xfff,
- 0xfff, 0xfff, 0xfff, 0x16f, 0xfff, 0x0a4, 0x37a, 0xfff,
- 0x29a, 0xfff, 0x284, 0xfff, 0xfff, 0xfff, 0xfff, 0x4c6,
- 0x2a2, 0x3a3, 0xfff, 0x201, 0xfff, 0xfff, 0xfff, 0x4bd,
- 0x005, 0x54a, 0x3b5, 0x204, 0x2ee, 0x11d, 0x436, 0xfff,
- 0xfff, 0xfff, 0xfff, 0xfff, 0x3ec, 0xfff, 0xfff, 0xfff,
- 0xfff, 0xfff, 0xfff, 0xfff, 0x11f, 0x498, 0x21c, 0xfff,
- 0xfff, 0xfff, 0x3d6, 0xfff, 0x4ab, 0xfff, 0x432, 0x2eb,
- 0x542, 0x4fd, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
- 0xfff, 0xfff, 0xfff, 0x4ce, 0xfff, 0xfff, 0x2fb, 0xfff,
- 0xfff, 0x2e1, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
- 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x1b9, 0x037, 0x0dd,
- 0xfff, 0xfff, 0xfff, 0x2bf, 0x521, 0x496, 0x095, 0xfff,
- 0xfff, 0x328, 0x070, 0x1bf, 0xfff, 0x393, 0xfff, 0xfff,
- 0x102, 0xfff, 0xfff, 0x21b, 0xfff, 0x142, 0x263, 0x519,
- 0xfff, 0x2a5, 0x177, 0xfff, 0x14d, 0x471, 0x4ae, 0xfff,
- 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
- 0x1f6, 0xfff, 0x481, 0xfff, 0xfff, 0xfff, 0x151, 0xfff,
- 0xfff, 0xfff, 0x085, 0x33f, 0xfff, 0xfff, 0xfff, 0x084,
- 0xfff, 0xfff, 0xfff, 0x345, 0x3a2, 0xfff, 0xfff, 0x0a0,
- 0x0da, 0x024, 0xfff, 0xfff, 0xfff, 0x1bd, 0xfff, 0x55c,
- 0x467, 0x445, 0xfff, 0xfff, 0xfff, 0x052, 0xfff, 0xfff,
- 0xfff, 0xfff, 0x51e, 0xfff, 0xfff, 0x39d, 0xfff, 0x35f,
- 0xfff, 0x376, 0x3ee, 0xfff, 0xfff, 0xfff, 0xfff, 0x448,
- 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x16a,
- 0xfff, 0x036, 0x38f, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
- 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x211,
- 0xfff, 0xfff, 0xfff, 0x230, 0xfff, 0xfff, 0x3ba, 0xfff,
- 0xfff, 0xfff, 0x3ce, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
- 0xfff, 0xfff, 0xfff, 0x229, 0xfff, 0x176, 0xfff, 0xfff,
- 0xfff, 0xfff, 0xfff, 0x00b, 0xfff, 0x162, 0x018, 0xfff,
- 0xfff, 0x233, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
- 0x400, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
- 0xfff, 0xfff, 0xfff, 0x12b, 0xfff, 0xfff, 0xfff, 0xfff,
- 0xfff, 0x3f4, 0xfff, 0x0f0, 0xfff, 0x1ac, 0xfff, 0xfff,
- 0x119, 0xfff, 0x2c0, 0xfff, 0xfff, 0xfff, 0x49b, 0xfff,
- 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x23c, 0xfff,
- 0x4b3, 0x010, 0x064, 0xfff, 0xfff, 0x4ba, 0xfff, 0xfff,
- 0xfff, 0xfff, 0xfff, 0x3c2, 0xfff, 0xfff, 0xfff, 0xfff,
- 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x006, 0x196, 0xfff,
- 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x100, 0x191, 0xfff,
- 0x1ea, 0x29f, 0xfff, 0xfff, 0xfff, 0x276, 0xfff, 0xfff,
- 0x2b1, 0x3b9, 0xfff, 0x03c, 0xfff, 0xfff, 0xfff, 0x180,
- 0xfff, 0x08f, 0xfff, 0xfff, 0x19e, 0x019, 0xfff, 0x0b0,
- 0x0fd, 0x332, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
- 0xfff, 0x06b, 0x2e8, 0xfff, 0x446, 0xfff, 0xfff, 0x004,
- 0x247, 0x197, 0xfff, 0x112, 0x169, 0x292, 0xfff, 0x302,
- 0xfff, 0xfff, 0x33b, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
- 0xfff, 0xfff, 0xfff, 0x287, 0x21f, 0xfff, 0x3ea, 0xfff,
- 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x4e7, 0xfff, 0xfff,
- 0xfff, 0xfff, 0xfff, 0x3a8, 0xfff, 0xfff, 0x2bc, 0xfff,
- 0x484, 0x296, 0xfff, 0x1c9, 0x08c, 0x1e5, 0x48a, 0xfff,
- 0x360, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
- 0x1ca, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
- 0xfff, 0xfff, 0xfff, 0x10d, 0xfff, 0xfff, 0xfff, 0xfff,
- 0xfff, 0xfff, 0x066, 0x2ea, 0x28b, 0x25b, 0xfff, 0x072,
- 0xfff, 0xfff, 0xfff, 0xfff, 0x2b6, 0xfff, 0xfff, 0x272,
- 0xfff, 0xfff, 0x525, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
- 0x2ca, 0xfff, 0xfff, 0xfff, 0x299, 0xfff, 0xfff, 0xfff,
- 0x558, 0x41a, 0xfff, 0x4f7, 0x557, 0xfff, 0x4a0, 0x344,
- 0x12c, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x125,
- 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
- 0x40e, 0xfff, 0xfff, 0x502, 0xfff, 0x103, 0x3e6, 0xfff,
- 0x527, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
- 0xfff, 0xfff, 0xfff, 0x45d, 0xfff, 0xfff, 0xfff, 0xfff,
- 0x44e, 0xfff, 0xfff, 0xfff, 0xfff, 0x0d2, 0x4c9, 0x35e,
- 0x459, 0x2d9, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x17d,
- 0x0c4, 0xfff, 0xfff, 0xfff, 0x3ac, 0x390, 0x094, 0xfff,
- 0x483, 0x0ab, 0xfff, 0x253, 0xfff, 0x391, 0xfff, 0xfff,
- 0xfff, 0xfff, 0x123, 0x0ef, 0xfff, 0xfff, 0xfff, 0x330,
- 0x38c, 0xfff, 0xfff, 0x2ae, 0xfff, 0xfff, 0xfff, 0x042,
- 0x012, 0x06d, 0xfff, 0xfff, 0xfff, 0x32a, 0x3db, 0x364,
- 0x2dc, 0xfff, 0x30f, 0x3d7, 0x4a5, 0x050, 0xfff, 0xfff,
- 0x029, 0xfff, 0xfff, 0xfff, 0xfff, 0x1d1, 0xfff, 0xfff,
- 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x480, 0xfff,
- 0x4ed, 0x081, 0x0a1, 0xfff, 0xfff, 0xfff, 0x30e, 0x52f,
- 0x257, 0xfff, 0xfff, 0x447, 0xfff, 0xfff, 0xfff, 0xfff,
- 0xfff, 0xfff, 0xfff, 0x401, 0x3cc, 0xfff, 0xfff, 0x0fb,
- 0x2c9, 0x42a, 0x314, 0x33e, 0x3bd, 0x318, 0xfff, 0x10e,
- 0x2a1, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x24c,
- 0x506, 0xfff, 0x267, 0xfff, 0xfff, 0x219, 0xfff, 0x1eb,
- 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
- 0x309, 0x3e2, 0x46c, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
- 0x384, 0xfff, 0xfff, 0xfff, 0xfff, 0x50c, 0xfff, 0x24b,
- 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x038,
- 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x194,
- 0x143, 0x3e3, 0xfff, 0xfff, 0xfff, 0x4c2, 0xfff, 0xfff,
- 0x0e1, 0x25c, 0xfff, 0x237, 0xfff, 0x1fe, 0xfff, 0xfff,
- 0xfff, 0x065, 0x2a4, 0xfff, 0x386, 0x55a, 0x11b, 0xfff,
- 0xfff, 0x192, 0xfff, 0x183, 0x00e, 0xfff, 0xfff, 0xfff,
- 0xfff, 0xfff, 0xfff, 0x4b2, 0x18e, 0xfff, 0xfff, 0xfff,
- 0xfff, 0x486, 0x4ef, 0x0c6, 0x380, 0xfff, 0x4a8, 0xfff,
- 0x0c5, 0xfff, 0xfff, 0xfff, 0xfff, 0x093, 0x1b8, 0xfff,
- 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x2e6,
- 0xfff, 0x0f3, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
- 0x28e, 0xfff, 0x53b, 0x420, 0x22a, 0x33a, 0xfff, 0x387,
- 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x2a3, 0xfff, 0xfff,
- 0xfff, 0x428, 0x500, 0xfff, 0xfff, 0x120, 0x2c6, 0x290,
- 0x2f5, 0x0e3, 0xfff, 0x0b7, 0xfff, 0x319, 0x474, 0xfff,
- 0xfff, 0xfff, 0x529, 0x014, 0xfff, 0x41b, 0x40a, 0x18b,
- 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x0d9,
- 0xfff, 0x38a, 0xfff, 0xfff, 0xfff, 0xfff, 0x1ce, 0xfff,
- 0xfff, 0xfff, 0xfff, 0xfff, 0x3b1, 0xfff, 0xfff, 0x05d,
- 0x2c4, 0xfff, 0xfff, 0x4af, 0xfff, 0x030, 0xfff, 0xfff,
- 0x203, 0xfff, 0x277, 0x256, 0xfff, 0xfff, 0xfff, 0x4f9,
- 0xfff, 0x2c7, 0xfff, 0x466, 0x016, 0x1cd, 0xfff, 0x167,
- 0xfff, 0xfff, 0x0c8, 0xfff, 0x43d, 0xfff, 0xfff, 0x020,
- 0xfff, 0xfff, 0x232, 0x1cb, 0x1e0, 0xfff, 0xfff, 0x347,
- 0xfff, 0x478, 0xfff, 0x365, 0xfff, 0xfff, 0xfff, 0xfff,
- 0x358, 0xfff, 0x10b, 0xfff, 0x35d, 0xfff, 0xfff, 0xfff,
- 0xfff, 0xfff, 0x452, 0x22d, 0xfff, 0xfff, 0x47d, 0xfff,
- 0x2f3, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x460, 0xfff,
- 0xfff, 0xfff, 0x50b, 0xfff, 0xfff, 0xfff, 0x2ec, 0xfff,
- 0xfff, 0xfff, 0xfff, 0xfff, 0x4b1, 0x422, 0xfff, 0xfff,
- 0xfff, 0x2d4, 0xfff, 0x239, 0xfff, 0xfff, 0xfff, 0x439,
- 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
- 0xfff, 0x491, 0x075, 0xfff, 0xfff, 0xfff, 0x06c, 0xfff,
- 0xfff, 0x0f9, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
- 0xfff, 0x139, 0xfff, 0x4f6, 0xfff, 0xfff, 0x409, 0xfff,
- 0xfff, 0x15b, 0xfff, 0xfff, 0x348, 0xfff, 0xfff, 0xfff,
- 0xfff, 0x4a2, 0x49d, 0xfff, 0x033, 0x175, 0xfff, 0x039,
- 0xfff, 0x312, 0x40c, 0xfff, 0xfff, 0x325, 0xfff, 0xfff,
- 0xfff, 0xfff, 0xfff, 0xfff, 0x4aa, 0xfff, 0xfff, 0xfff,
- 0xfff, 0xfff, 0xfff, 0x165, 0x3bc, 0x48c, 0x310, 0x096,
- 0xfff, 0xfff, 0x250, 0x1a2, 0xfff, 0xfff, 0xfff, 0xfff,
- 0x20d, 0x2ac, 0xfff, 0xfff, 0x39b, 0xfff, 0x377, 0xfff,
- 0x512, 0x495, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
- 0xfff, 0xfff, 0xfff, 0xfff, 0x357, 0x4ea, 0xfff, 0xfff,
- 0xfff, 0xfff, 0x198, 0xfff, 0xfff, 0xfff, 0x434, 0x04a,
- 0xfff, 0xfff, 0xfff, 0xfff, 0x062, 0xfff, 0x1d6, 0x1c8,
- 0xfff, 0x1f3, 0x281, 0xfff, 0x462, 0xfff, 0xfff, 0xfff,
- 0x4b0, 0xfff, 0x207, 0xfff, 0xfff, 0xfff, 0xfff, 0x3dd,
- 0xfff, 0xfff, 0x55d, 0xfff, 0x552, 0x494, 0x1af, 0xfff,
- 0xfff, 0xfff, 0xfff, 0xfff, 0x227, 0xfff, 0xfff, 0x069,
- 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x43e,
- 0x0b5, 0xfff, 0x524, 0x2d2, 0xfff, 0xfff, 0xfff, 0x28f,
- 0xfff, 0x01b, 0x50e, 0xfff, 0xfff, 0x1bb, 0xfff, 0xfff,
- 0x41d, 0xfff, 0x32e, 0x48e, 0xfff, 0x1f7, 0x224, 0xfff,
- 0xfff, 0xfff, 0xfff, 0xfff, 0x394, 0xfff, 0xfff, 0xfff,
- 0xfff, 0x52c, 0xfff, 0xfff, 0xfff, 0x392, 0xfff, 0x1e7,
- 0xfff, 0xfff, 0x3f9, 0x3a7, 0xfff, 0x51f, 0xfff, 0x0bb,
- 0x118, 0x3ca, 0xfff, 0x1dd, 0xfff, 0x48b, 0xfff, 0xfff,
- 0xfff, 0xfff, 0x50f, 0xfff, 0x0d6, 0xfff, 0x1fa, 0xfff,
- 0x11e, 0xfff, 0xfff, 0xfff, 0xfff, 0x4d7, 0xfff, 0x078,
- 0x008, 0xfff, 0x25d, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
- 0x032, 0x33c, 0xfff, 0x4d9, 0x160, 0xfff, 0xfff, 0x300,
- 0x0b1, 0xfff, 0x322, 0xfff, 0x4ec, 0xfff, 0xfff, 0x200,
- 0x00c, 0x369, 0x473, 0xfff, 0xfff, 0x32c, 0xfff, 0xfff,
- 0xfff, 0xfff, 0xfff, 0xfff, 0x53e, 0x3d4, 0x417, 0xfff,
- 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
- 0x34b, 0x001, 0x39a, 0x02c, 0xfff, 0xfff, 0x2ce, 0x00f,
- 0xfff, 0x0ba, 0xfff, 0xfff, 0xfff, 0xfff, 0x060, 0xfff,
- 0x406, 0xfff, 0xfff, 0xfff, 0x4ee, 0x4ac, 0xfff, 0x43f,
- 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x29b, 0xfff, 0xfff,
- 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x216,
- 0x190, 0xfff, 0x396, 0x464, 0xfff, 0xfff, 0x323, 0xfff,
- 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x2e9, 0xfff, 0x26d,
- 0x2cd, 0x040, 0xfff, 0xfff, 0xfff, 0xfff, 0x38b, 0x3c0,
- 0xfff, 0xfff, 0xfff, 0x1f2, 0xfff, 0x0ea, 0xfff, 0xfff,
- 0x472, 0xfff, 0x1fb, 0xfff, 0xfff, 0x0af, 0x27f, 0xfff,
- 0xfff, 0xfff, 0x479, 0x023, 0xfff, 0x0d8, 0x3b3, 0xfff,
- 0xfff, 0xfff, 0x121, 0xfff, 0xfff, 0x3bf, 0xfff, 0xfff,
- 0x16b, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
- 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
- 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
- 0x45a, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
- 0xfff, 0x0be, 0xfff, 0xfff, 0xfff, 0x111, 0xfff, 0x220,
- 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
- 0xfff, 0xfff, 0x09b, 0x218, 0xfff, 0x022, 0x202, 0xfff,
- 0x4c8, 0xfff, 0x0ed, 0xfff, 0xfff, 0x182, 0xfff, 0xfff,
- 0xfff, 0x17f, 0x213, 0xfff, 0x321, 0x36a, 0xfff, 0x086,
- 0xfff, 0xfff, 0xfff, 0x43b, 0x088, 0xfff, 0xfff, 0xfff,
- 0xfff, 0x26c, 0xfff, 0x2f8, 0x3b4, 0xfff, 0xfff, 0xfff,
- 0x132, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x333, 0x444,
- 0x0c1, 0x4d8, 0x46d, 0x264, 0xfff, 0xfff, 0xfff, 0xfff,
- 0x426, 0xfff, 0xfff, 0xfff, 0xfff, 0x2fe, 0xfff, 0xfff,
- 0xfff, 0xfff, 0x011, 0xfff, 0x05f, 0xfff, 0xfff, 0xfff,
- 0xfff, 0x10c, 0x101, 0xfff, 0xfff, 0xfff, 0xfff, 0x110,
- 0xfff, 0x044, 0x304, 0x361, 0x404, 0xfff, 0x51b, 0x099,
- 0xfff, 0x440, 0xfff, 0xfff, 0xfff, 0x222, 0xfff, 0xfff,
- 0xfff, 0xfff, 0x1b5, 0xfff, 0x136, 0x430, 0xfff, 0x1da,
- 0xfff, 0xfff, 0xfff, 0x043, 0xfff, 0x17c, 0xfff, 0xfff,
- 0xfff, 0x01c, 0xfff, 0xfff, 0xfff, 0x425, 0x236, 0xfff,
- 0x317, 0xfff, 0xfff, 0x437, 0x3fc, 0xfff, 0x1f1, 0xfff,
- 0x324, 0xfff, 0xfff, 0x0ca, 0x306, 0xfff, 0x548, 0xfff,
- 0x46e, 0xfff, 0xfff, 0xfff, 0x4b8, 0x1c2, 0x286, 0xfff,
- 0xfff, 0x087, 0x18a, 0x19f, 0xfff, 0xfff, 0xfff, 0xfff,
- 0x18c, 0xfff, 0x215, 0xfff, 0xfff, 0xfff, 0xfff, 0x283,
- 0xfff, 0xfff, 0xfff, 0x126, 0xfff, 0xfff, 0x370, 0xfff,
- 0x53f, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x31c, 0xfff,
- 0x4d1, 0xfff, 0xfff, 0xfff, 0x021, 0xfff, 0x157, 0xfff,
- 0xfff, 0x028, 0x16e, 0xfff, 0x421, 0xfff, 0x1c6, 0xfff,
- 0xfff, 0x511, 0xfff, 0xfff, 0x39c, 0x46f, 0x1b2, 0xfff,
- 0xfff, 0x316, 0xfff, 0xfff, 0x009, 0xfff, 0xfff, 0x195,
- 0xfff, 0x240, 0x546, 0xfff, 0xfff, 0x520, 0xfff, 0xfff,
- 0xfff, 0xfff, 0xfff, 0xfff, 0x454, 0xfff, 0xfff, 0xfff,
- 0x3f3, 0xfff, 0xfff, 0x187, 0xfff, 0x4a9, 0xfff, 0xfff,
- 0xfff, 0xfff, 0xfff, 0xfff, 0x51c, 0x453, 0x1e6, 0xfff,
- 0xfff, 0xfff, 0x1b0, 0xfff, 0x477, 0xfff, 0xfff, 0xfff,
- 0x4fe, 0xfff, 0x32f, 0xfff, 0xfff, 0x15e, 0x1d4, 0xfff,
- 0x0e5, 0xfff, 0xfff, 0xfff, 0x242, 0x14b, 0x046, 0xfff,
- 0x3f6, 0x3bb, 0x3e4, 0xfff, 0xfff, 0x2e3, 0xfff, 0x245,
- 0xfff, 0x149, 0xfff, 0xfff, 0xfff, 0x2db, 0xfff, 0xfff,
- 0x181, 0xfff, 0x089, 0x2c5, 0xfff, 0x1f5, 0xfff, 0x2d6,
- 0x507, 0xfff, 0x42d, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
- 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
- 0x080, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
- 0xfff, 0xfff, 0xfff, 0xfff, 0x3c3, 0x320, 0xfff, 0x1e1,
- 0xfff, 0x0f5, 0x13b, 0xfff, 0xfff, 0xfff, 0x003, 0x4da,
- 0xfff, 0xfff, 0xfff, 0x42c, 0xfff, 0xfff, 0x0cb, 0xfff,
- 0x536, 0x2c3, 0xfff, 0xfff, 0xfff, 0xfff, 0x199, 0xfff,
- 0xfff, 0x0c0, 0xfff, 0x01e, 0x497, 0xfff, 0xfff, 0x3e5,
- 0xfff, 0xfff, 0xfff, 0x0cf, 0xfff, 0x2bd, 0xfff, 0x223,
- 0xfff, 0x3ff, 0xfff, 0x058, 0x174, 0x3ef, 0xfff, 0x002
-};
-
-static unsigned short err_pos(unsigned short din)
-{
- BUG_ON(din >= ARRAY_SIZE(err_pos_lut));
- return err_pos_lut[din];
-}
-static int chk_no_err_only(unsigned short *chk_syndrome_list, unsigned short *err_info)
-{
- if ((chk_syndrome_list[0] | chk_syndrome_list[1] |
- chk_syndrome_list[2] | chk_syndrome_list[3] |
- chk_syndrome_list[4] | chk_syndrome_list[5] |
- chk_syndrome_list[6] | chk_syndrome_list[7]) != 0x0) {
- return -EINVAL;
- } else {
- err_info[0] = 0x0;
- return 0;
- }
-}
-static int chk_1_err_only(unsigned short *chk_syndrome_list, unsigned short *err_info)
-{
- unsigned short tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6;
- tmp0 = gf4096_mul(chk_syndrome_list[1], gf4096_inv(chk_syndrome_list[0]));
- tmp1 = gf4096_mul(chk_syndrome_list[2], gf4096_inv(chk_syndrome_list[1]));
- tmp2 = gf4096_mul(chk_syndrome_list[3], gf4096_inv(chk_syndrome_list[2]));
- tmp3 = gf4096_mul(chk_syndrome_list[4], gf4096_inv(chk_syndrome_list[3]));
- tmp4 = gf4096_mul(chk_syndrome_list[5], gf4096_inv(chk_syndrome_list[4]));
- tmp5 = gf4096_mul(chk_syndrome_list[6], gf4096_inv(chk_syndrome_list[5]));
- tmp6 = gf4096_mul(chk_syndrome_list[7], gf4096_inv(chk_syndrome_list[6]));
- if ((tmp0 == tmp1) & (tmp1 == tmp2) & (tmp2 == tmp3) & (tmp3 == tmp4) & (tmp4 == tmp5) & (tmp5 == tmp6)) {
- err_info[0] = 0x1; // encode 1-symbol error as 0x1
- err_info[1] = err_pos(tmp0);
- err_info[1] = (unsigned short)(0x55e - err_info[1]);
- err_info[5] = chk_syndrome_list[0];
- return 0;
- } else
- return -EINVAL;
-}
-static int chk_2_err_only(unsigned short *chk_syndrome_list, unsigned short *err_info)
-{
- unsigned short tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
- unsigned short coefs[4];
- unsigned short err_pats[4];
- int found_num_root = 0;
- unsigned short bit2_root0, bit2_root1;
- unsigned short bit2_root0_inv, bit2_root1_inv;
- unsigned short err_loc_eqn, test_root;
- unsigned short bit2_loc0, bit2_loc1;
- unsigned short bit2_pat0, bit2_pat1;
-
- find_2x2_soln(chk_syndrome_list[1],
- chk_syndrome_list[0],
- chk_syndrome_list[2], chk_syndrome_list[1], chk_syndrome_list[2], chk_syndrome_list[3], coefs);
- for (test_root = 0x1; test_root < 0xfff; test_root++) {
- err_loc_eqn =
- gf4096_mul(coefs[1], gf4096_mul(test_root, test_root)) ^ gf4096_mul(coefs[0], test_root) ^ 0x1;
- if (err_loc_eqn == 0x0) {
- if (found_num_root == 0) {
- bit2_root0 = test_root;
- found_num_root = 1;
- } else if (found_num_root == 1) {
- bit2_root1 = test_root;
- found_num_root = 2;
- break;
- }
- }
- }
- if (found_num_root != 2)
- return -EINVAL;
- else {
- bit2_root0_inv = gf4096_inv(bit2_root0);
- bit2_root1_inv = gf4096_inv(bit2_root1);
- find_2bit_err_pats(chk_syndrome_list[0],
- chk_syndrome_list[1], bit2_root0_inv, bit2_root1_inv, err_pats);
- bit2_pat0 = err_pats[0];
- bit2_pat1 = err_pats[1];
- //for(x+1)
- tmp0 = gf4096_mul(gf4096_mul(bit2_root0_inv, bit2_root0_inv), gf4096_mul(bit2_root0_inv, bit2_root0_inv)); //rinv0^4
- tmp1 = gf4096_mul(bit2_root0_inv, tmp0); //rinv0^5
- tmp2 = gf4096_mul(bit2_root0_inv, tmp1); //rinv0^6
- tmp3 = gf4096_mul(bit2_root0_inv, tmp2); //rinv0^7
- tmp4 = gf4096_mul(gf4096_mul(bit2_root1_inv, bit2_root1_inv), gf4096_mul(bit2_root1_inv, bit2_root1_inv)); //rinv1^4
- tmp5 = gf4096_mul(bit2_root1_inv, tmp4); //rinv1^5
- tmp6 = gf4096_mul(bit2_root1_inv, tmp5); //rinv1^6
- tmp7 = gf4096_mul(bit2_root1_inv, tmp6); //rinv1^7
- //check if only 2-bit error
- if ((chk_syndrome_list[4] ==
- (gf4096_mul(bit2_pat0, tmp0) ^
- gf4096_mul(bit2_pat1,
- tmp4))) & (chk_syndrome_list[5] ==
- (gf4096_mul(bit2_pat0, tmp1) ^
- gf4096_mul(bit2_pat1,
- tmp5))) &
- (chk_syndrome_list[6] ==
- (gf4096_mul(bit2_pat0, tmp2) ^
- gf4096_mul(bit2_pat1,
- tmp6))) & (chk_syndrome_list[7] ==
- (gf4096_mul(bit2_pat0, tmp3) ^ gf4096_mul(bit2_pat1, tmp7)))) {
- if ((err_pos(bit2_root0_inv) == 0xfff) | (err_pos(bit2_root1_inv) == 0xfff)) {
- return -EINVAL;
- } else {
- bit2_loc0 = 0x55e - err_pos(bit2_root0_inv);
- bit2_loc1 = 0x55e - err_pos(bit2_root1_inv);
- err_info[0] = 0x2; // encode 2-symbol error as 0x2
- err_info[1] = bit2_loc0;
- err_info[2] = bit2_loc1;
- err_info[5] = bit2_pat0;
- err_info[6] = bit2_pat1;
- return 0;
- }
- } else
- return -EINVAL;
- }
-}
-static int chk_3_err_only(unsigned short *chk_syndrome_list, unsigned short *err_info)
-{
- unsigned short tmp0, tmp1, tmp2, tmp3, tmp4, tmp5;
- unsigned short coefs[4];
- unsigned short err_pats[4];
- int found_num_root = 0;
- unsigned short bit3_root0, bit3_root1, bit3_root2;
- unsigned short bit3_root0_inv, bit3_root1_inv, bit3_root2_inv;
- unsigned short err_loc_eqn, test_root;
-
- find_3bit_err_coefs(chk_syndrome_list[0], chk_syndrome_list[1],
- chk_syndrome_list[2], chk_syndrome_list[3],
- chk_syndrome_list[4], chk_syndrome_list[5], coefs);
-
- for (test_root = 0x1; test_root < 0xfff; test_root++) {
- err_loc_eqn = gf4096_mul(coefs[2],
- gf4096_mul(gf4096_mul(test_root, test_root),
- test_root)) ^ gf4096_mul(coefs[1], gf4096_mul(test_root, test_root))
- ^ gf4096_mul(coefs[0], test_root) ^ 0x1;
-
- if (err_loc_eqn == 0x0) {
- if (found_num_root == 0) {
- bit3_root0 = test_root;
- found_num_root = 1;
- } else if (found_num_root == 1) {
- bit3_root1 = test_root;
- found_num_root = 2;
- } else if (found_num_root == 2) {
- bit3_root2 = test_root;
- found_num_root = 3;
- break;
- }
- }
- }
- if (found_num_root != 3)
- return -EINVAL;
- else {
- bit3_root0_inv = gf4096_inv(bit3_root0);
- bit3_root1_inv = gf4096_inv(bit3_root1);
- bit3_root2_inv = gf4096_inv(bit3_root2);
-
- find_3bit_err_pats(chk_syndrome_list[0], chk_syndrome_list[1],
- chk_syndrome_list[2], bit3_root0_inv,
- bit3_root1_inv, bit3_root2_inv, err_pats);
-
- //check if only 3-bit error
- tmp0 = gf4096_mul(bit3_root0_inv, bit3_root0_inv);
- tmp0 = gf4096_mul(tmp0, tmp0);
- tmp0 = gf4096_mul(tmp0, bit3_root0_inv);
- tmp0 = gf4096_mul(tmp0, bit3_root0_inv); //rinv0^6
- tmp1 = gf4096_mul(tmp0, bit3_root0_inv); //rinv0^7
- tmp2 = gf4096_mul(bit3_root1_inv, bit3_root1_inv);
- tmp2 = gf4096_mul(tmp2, tmp2);
- tmp2 = gf4096_mul(tmp2, bit3_root1_inv);
- tmp2 = gf4096_mul(tmp2, bit3_root1_inv); //rinv1^6
- tmp3 = gf4096_mul(tmp2, bit3_root1_inv); //rinv1^7
- tmp4 = gf4096_mul(bit3_root2_inv, bit3_root2_inv);
- tmp4 = gf4096_mul(tmp4, tmp4);
- tmp4 = gf4096_mul(tmp4, bit3_root2_inv);
- tmp4 = gf4096_mul(tmp4, bit3_root2_inv); //rinv2^6
- tmp5 = gf4096_mul(tmp4, bit3_root2_inv); //rinv2^7
-
- //check if only 3 errors
- if ((chk_syndrome_list[6] == (gf4096_mul(err_pats[0], tmp0) ^
- gf4096_mul(err_pats[1], tmp2) ^
- gf4096_mul(err_pats[2], tmp4))) &
- (chk_syndrome_list[7] == (gf4096_mul(err_pats[0], tmp1) ^
- gf4096_mul(err_pats[1], tmp3) ^ gf4096_mul(err_pats[2], tmp5)))) {
- if ((err_pos(bit3_root0_inv) == 0xfff) |
- (err_pos(bit3_root1_inv) == 0xfff) | (err_pos(bit3_root2_inv) == 0xfff)) {
- return -EINVAL;
- } else {
- err_info[0] = 0x3;
- err_info[1] = (0x55e - err_pos(bit3_root0_inv));
- err_info[2] = (0x55e - err_pos(bit3_root1_inv));
- err_info[3] = (0x55e - err_pos(bit3_root2_inv));
- err_info[5] = err_pats[0];
- err_info[6] = err_pats[1];
- err_info[7] = err_pats[2];
- return 0;
- }
- } else
- return -EINVAL;
- }
-}
-static int chk_4_err_only(unsigned short *chk_syndrome_list, unsigned short *err_info)
-{
- unsigned short coefs[4];
- unsigned short err_pats[4];
- int found_num_root = 0;
- unsigned short bit4_root0, bit4_root1, bit4_root2, bit4_root3;
- unsigned short bit4_root0_inv, bit4_root1_inv, bit4_root2_inv, bit4_root3_inv;
- unsigned short err_loc_eqn, test_root;
-
- find_4bit_err_coefs(chk_syndrome_list[0],
- chk_syndrome_list[1],
- chk_syndrome_list[2],
- chk_syndrome_list[3],
- chk_syndrome_list[4],
- chk_syndrome_list[5], chk_syndrome_list[6], chk_syndrome_list[7], coefs);
-
- for (test_root = 0x1; test_root < 0xfff; test_root++) {
- err_loc_eqn =
- gf4096_mul(coefs[3],
- gf4096_mul(gf4096_mul
- (gf4096_mul(test_root, test_root),
- test_root),
- test_root)) ^ gf4096_mul(coefs[2],
- gf4096_mul
- (gf4096_mul(test_root, test_root), test_root))
- ^ gf4096_mul(coefs[1], gf4096_mul(test_root, test_root)) ^ gf4096_mul(coefs[0], test_root)
- ^ 0x1;
- if (err_loc_eqn == 0x0) {
- if (found_num_root == 0) {
- bit4_root0 = test_root;
- found_num_root = 1;
- } else if (found_num_root == 1) {
- bit4_root1 = test_root;
- found_num_root = 2;
- } else if (found_num_root == 2) {
- bit4_root2 = test_root;
- found_num_root = 3;
- } else {
- found_num_root = 4;
- bit4_root3 = test_root;
- break;
- }
- }
- }
- if (found_num_root != 4) {
- return -EINVAL;
- } else {
- bit4_root0_inv = gf4096_inv(bit4_root0);
- bit4_root1_inv = gf4096_inv(bit4_root1);
- bit4_root2_inv = gf4096_inv(bit4_root2);
- bit4_root3_inv = gf4096_inv(bit4_root3);
- find_4bit_err_pats(chk_syndrome_list[0],
- chk_syndrome_list[1],
- chk_syndrome_list[2],
- chk_syndrome_list[3],
- bit4_root0_inv, bit4_root1_inv, bit4_root2_inv, bit4_root3_inv, err_pats);
- err_info[0] = 0x4;
- err_info[1] = (0x55e - err_pos(bit4_root0_inv));
- err_info[2] = (0x55e - err_pos(bit4_root1_inv));
- err_info[3] = (0x55e - err_pos(bit4_root2_inv));
- err_info[4] = (0x55e - err_pos(bit4_root3_inv));
- err_info[5] = err_pats[0];
- err_info[6] = err_pats[1];
- err_info[7] = err_pats[2];
- err_info[8] = err_pats[3];
- return 0;
- }
-}
-
-void correct_12bit_symbol(unsigned char *buf, unsigned short sym,
- unsigned short val)
-{
- if (unlikely(sym > 1366)) {
- printk(KERN_ERR "Error: symbol %d out of range; cannot correct\n", sym);
- } else if (sym == 0) {
- buf[0] ^= val;
- } else if (sym & 1) {
- buf[1+(3*(sym-1))/2] ^= (val >> 4);
- buf[2+(3*(sym-1))/2] ^= ((val & 0xf) << 4);
- } else {
- buf[2+(3*(sym-2))/2] ^= (val >> 8);
- buf[3+(3*(sym-2))/2] ^= (val & 0xff);
- }
-}
-
-static int debugecc = 0;
-module_param(debugecc, int, 0644);
-
-int cafe_correct_ecc(unsigned char *buf,
- unsigned short *chk_syndrome_list)
-{
- unsigned short err_info[9];
- int i;
-
- if (debugecc) {
- printk(KERN_WARNING "cafe_correct_ecc invoked. Syndromes %x %x %x %x %x %x %x %x\n",
- chk_syndrome_list[0], chk_syndrome_list[1],
- chk_syndrome_list[2], chk_syndrome_list[3],
- chk_syndrome_list[4], chk_syndrome_list[5],
- chk_syndrome_list[6], chk_syndrome_list[7]);
- for (i=0; i < 2048; i+=16) {
- printk(KERN_WARNING "D %04x: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
- i,
- buf[i], buf[i+1], buf[i+2], buf[i+3],
- buf[i+4], buf[i+5], buf[i+6], buf[i+7],
- buf[i+8], buf[i+9], buf[i+10], buf[i+11],
- buf[i+12], buf[i+13], buf[i+14], buf[i+15]);
- }
- for ( ; i < 2112; i+=16) {
- printk(KERN_WARNING "O %02x: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
- i - 2048,
- buf[i], buf[i+1], buf[i+2], buf[i+3],
- buf[i+4], buf[i+5], buf[i+6], buf[i+7],
- buf[i+8], buf[i+9], buf[i+10], buf[i+11],
- buf[i+12], buf[i+13], buf[i+14], buf[i+15]);
- }
- }
-
-
-
- if (chk_no_err_only(chk_syndrome_list, err_info) &&
- chk_1_err_only(chk_syndrome_list, err_info) &&
- chk_2_err_only(chk_syndrome_list, err_info) &&
- chk_3_err_only(chk_syndrome_list, err_info) &&
- chk_4_err_only(chk_syndrome_list, err_info)) {
- return -EIO;
- }
-
- for (i=0; i < err_info[0]; i++) {
- if (debugecc)
- printk(KERN_WARNING "Correct symbol %d with 0x%03x\n",
- err_info[1+i], err_info[5+i]);
-
- correct_12bit_symbol(buf, err_info[1+i], err_info[5+i]);
- }
-
- return err_info[0];
-}
-
diff --git a/drivers/mtd/nand/cafe.c b/drivers/mtd/nand/cafe_nand.c
index c328a751451..cff969d05d4 100644
--- a/drivers/mtd/nand/cafe.c
+++ b/drivers/mtd/nand/cafe_nand.c
@@ -11,6 +11,7 @@
#undef DEBUG
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
+#include <linux/rslib.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
@@ -46,13 +47,14 @@
#define CAFE_GLOBAL_IRQ_MASK 0x300c
#define CAFE_NAND_RESET 0x3034
-int cafe_correct_ecc(unsigned char *buf,
- unsigned short *chk_syndrome_list);
+/* Missing from the datasheet: bit 19 of CTRL1 sets CE0 vs. CE1 */
+#define CTRL1_CHIPSELECT (1<<19)
struct cafe_priv {
struct nand_chip nand;
struct pci_dev *pdev;
void __iomem *mmio;
+ struct rs_control *rs;
uint32_t ctl1;
uint32_t ctl2;
int datalen;
@@ -195,8 +197,8 @@ static void cafe_nand_cmdfunc(struct mtd_info *mtd, unsigned command,
cafe->data_pos = cafe->datalen = 0;
- /* Set command valid bit */
- ctl1 = 0x80000000 | command;
+ /* Set command valid bit, mask in the chip select bit */
+ ctl1 = 0x80000000 | command | (cafe->ctl1 & CTRL1_CHIPSELECT);
/* Set RD or WR bits as appropriate */
if (command == NAND_CMD_READID || command == NAND_CMD_STATUS) {
@@ -309,8 +311,16 @@ static void cafe_nand_cmdfunc(struct mtd_info *mtd, unsigned command,
static void cafe_select_chip(struct mtd_info *mtd, int chipnr)
{
- //struct cafe_priv *cafe = mtd->priv;
- // cafe_dev_dbg(&cafe->pdev->dev, "select_chip %d\n", chipnr);
+ struct cafe_priv *cafe = mtd->priv;
+
+ cafe_dev_dbg(&cafe->pdev->dev, "select_chip %d\n", chipnr);
+
+ /* Mask the appropriate bit into the stored value of ctl1
+ which will be used by cafe_nand_cmdfunc() */
+ if (chipnr)
+ cafe->ctl1 |= CTRL1_CHIPSELECT;
+ else
+ cafe->ctl1 &= ~CTRL1_CHIPSELECT;
}
static int cafe_nand_interrupt(int irq, void *id)
@@ -374,28 +384,66 @@ static int cafe_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip,
chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
if (checkecc && cafe_readl(cafe, NAND_ECC_RESULT) & (1<<18)) {
- unsigned short syn[8];
- int i;
+ unsigned short syn[8], pat[4];
+ int pos[4];
+ u8 *oob = chip->oob_poi;
+ int i, n;
for (i=0; i<8; i+=2) {
uint32_t tmp = cafe_readl(cafe, NAND_ECC_SYN01 + (i*2));
- syn[i] = tmp & 0xfff;
- syn[i+1] = (tmp >> 16) & 0xfff;
+ syn[i] = cafe->rs->index_of[tmp & 0xfff];
+ syn[i+1] = cafe->rs->index_of[(tmp >> 16) & 0xfff];
+ }
+
+ n = decode_rs16(cafe->rs, NULL, NULL, 1367, syn, 0, pos, 0,
+ pat);
+
+ for (i = 0; i < n; i++) {
+ int p = pos[i];
+
+ /* The 12-bit symbols are mapped to bytes here */
+
+ if (p > 1374) {
+ /* out of range */
+ n = -1374;
+ } else if (p == 0) {
+ /* high four bits do not correspond to data */
+ if (pat[i] > 0xff)
+ n = -2048;
+ else
+ buf[0] ^= pat[i];
+ } else if (p == 1365) {
+ buf[2047] ^= pat[i] >> 4;
+ oob[0] ^= pat[i] << 4;
+ } else if (p > 1365) {
+ if ((p & 1) == 1) {
+ oob[3*p/2 - 2048] ^= pat[i] >> 4;
+ oob[3*p/2 - 2047] ^= pat[i] << 4;
+ } else {
+ oob[3*p/2 - 2049] ^= pat[i] >> 8;
+ oob[3*p/2 - 2048] ^= pat[i];
+ }
+ } else if ((p & 1) == 1) {
+ buf[3*p/2] ^= pat[i] >> 4;
+ buf[3*p/2 + 1] ^= pat[i] << 4;
+ } else {
+ buf[3*p/2 - 1] ^= pat[i] >> 8;
+ buf[3*p/2] ^= pat[i];
+ }
}
- if ((i = cafe_correct_ecc(buf, syn)) < 0) {
+ if (n < 0) {
dev_dbg(&cafe->pdev->dev, "Failed to correct ECC at %08x\n",
cafe_readl(cafe, NAND_ADDR2) * 2048);
- for (i=0; i< 0x5c; i+=4)
+ for (i = 0; i < 0x5c; i += 4)
printk("Register %x: %08x\n", i, readl(cafe->mmio + i));
mtd->ecc_stats.failed++;
} else {
- dev_dbg(&cafe->pdev->dev, "Corrected %d symbol errors\n", i);
- mtd->ecc_stats.corrected += i;
+ dev_dbg(&cafe->pdev->dev, "Corrected %d symbol errors\n", n);
+ mtd->ecc_stats.corrected += n;
}
}
-
return 0;
}
@@ -416,7 +464,7 @@ static uint8_t cafe_mirror_pattern_512[] = { 0xBC };
static struct nand_bbt_descr cafe_bbt_main_descr_2048 = {
.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
- | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+ | NAND_BBT_2BIT | NAND_BBT_VERSION,
.offs = 14,
.len = 4,
.veroffs = 18,
@@ -426,7 +474,7 @@ static struct nand_bbt_descr cafe_bbt_main_descr_2048 = {
static struct nand_bbt_descr cafe_bbt_mirror_descr_2048 = {
.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
- | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+ | NAND_BBT_2BIT | NAND_BBT_VERSION,
.offs = 14,
.len = 4,
.veroffs = 18,
@@ -442,7 +490,7 @@ static struct nand_ecclayout cafe_oobinfo_512 = {
static struct nand_bbt_descr cafe_bbt_main_descr_512 = {
.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
- | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+ | NAND_BBT_2BIT | NAND_BBT_VERSION,
.offs = 14,
.len = 1,
.veroffs = 15,
@@ -452,7 +500,7 @@ static struct nand_bbt_descr cafe_bbt_main_descr_512 = {
static struct nand_bbt_descr cafe_bbt_mirror_descr_512 = {
.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
- | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+ | NAND_BBT_2BIT | NAND_BBT_VERSION,
.offs = 14,
.len = 1,
.veroffs = 15,
@@ -525,6 +573,48 @@ static int cafe_nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
return 0;
}
+/* F_2[X]/(X**6+X+1) */
+static unsigned short __devinit gf64_mul(u8 a, u8 b)
+{
+ u8 c;
+ unsigned int i;
+
+ c = 0;
+ for (i = 0; i < 6; i++) {
+ if (a & 1)
+ c ^= b;
+ a >>= 1;
+ b <<= 1;
+ if ((b & 0x40) != 0)
+ b ^= 0x43;
+ }
+
+ return c;
+}
+
+/* F_64[X]/(X**2+X+A**-1) with A the generator of F_64[X] */
+static u16 __devinit gf4096_mul(u16 a, u16 b)
+{
+ u8 ah, al, bh, bl, ch, cl;
+
+ ah = a >> 6;
+ al = a & 0x3f;
+ bh = b >> 6;
+ bl = b & 0x3f;
+
+ ch = gf64_mul(ah ^ al, bh ^ bl) ^ gf64_mul(al, bl);
+ cl = gf64_mul(gf64_mul(ah, bh), 0x21) ^ gf64_mul(al, bl);
+
+ return (ch << 6) ^ cl;
+}
+
+static int __devinit cafe_mul(int x)
+{
+ if (x == 0)
+ return 1;
+ return gf4096_mul(x, 0xe01);
+}
+
static int __devinit cafe_nand_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -564,6 +654,12 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev,
}
cafe->nand.buffers = (void *)cafe->dmabuf + 2112;
+ cafe->rs = init_rs_non_canonical(12, &cafe_mul, 0, 1, 8);
+ if (!cafe->rs) {
+ err = -ENOMEM;
+ goto out_ior;
+ }
+
cafe->nand.cmdfunc = cafe_nand_cmdfunc;
cafe->nand.dev_ready = cafe_device_ready;
cafe->nand.read_byte = cafe_read_byte;
@@ -646,7 +742,7 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev,
cafe_readl(cafe, GLOBAL_CTRL), cafe_readl(cafe, GLOBAL_IRQ_MASK));
/* Scan to find existence of the device */
- if (nand_scan_ident(mtd, 1)) {
+ if (nand_scan_ident(mtd, 2)) {
err = -ENXIO;
goto out_irq;
}
@@ -713,6 +809,7 @@ static void __devexit cafe_nand_remove(struct pci_dev *pdev)
cafe_writel(cafe, ~1 & cafe_readl(cafe, GLOBAL_IRQ_MASK), GLOBAL_IRQ_MASK);
free_irq(pdev->irq, mtd);
nand_release(mtd);
+ free_rs(cafe->rs);
pci_iounmap(pdev, cafe->mmio);
dma_free_coherent(&cafe->pdev->dev, 2112, cafe->dmabuf, cafe->dmaaddr);
kfree(mtd);
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 04de315e493..7e68203fe1b 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -303,28 +303,27 @@ static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
struct nand_chip *chip = mtd->priv;
u16 bad;
+ page = (int)(ofs >> chip->page_shift) & chip->pagemask;
+
if (getchip) {
- page = (int)(ofs >> chip->page_shift);
chipnr = (int)(ofs >> chip->chip_shift);
nand_get_device(chip, mtd, FL_READING);
/* Select the NAND device */
chip->select_chip(mtd, chipnr);
- } else
- page = (int)(ofs >> chip->page_shift);
+ }
if (chip->options & NAND_BUSWIDTH_16) {
chip->cmdfunc(mtd, NAND_CMD_READOOB, chip->badblockpos & 0xFE,
- page & chip->pagemask);
+ page);
bad = cpu_to_le16(chip->read_word(mtd));
if (chip->badblockpos & 0x1)
bad >>= 8;
if ((bad & 0xFF) != 0xff)
res = 1;
} else {
- chip->cmdfunc(mtd, NAND_CMD_READOOB, chip->badblockpos,
- page & chip->pagemask);
+ chip->cmdfunc(mtd, NAND_CMD_READOOB, chip->badblockpos, page);
if (chip->read_byte(mtd) != 0xff)
res = 1;
}
diff --git a/drivers/mtd/nand/plat_nand.c b/drivers/mtd/nand/plat_nand.c
new file mode 100644
index 00000000000..cd725fc5e81
--- /dev/null
+++ b/drivers/mtd/nand/plat_nand.c
@@ -0,0 +1,150 @@
+/*
+ * Generic NAND driver
+ *
+ * Author: Vitaly Wool <vitalywool@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+
+struct plat_nand_data {
+ struct nand_chip chip;
+ struct mtd_info mtd;
+ void __iomem *io_base;
+#ifdef CONFIG_MTD_PARTITIONS
+ int nr_parts;
+ struct mtd_partition *parts;
+#endif
+};
+
+/*
+ * Probe for the NAND device.
+ */
+static int __init plat_nand_probe(struct platform_device *pdev)
+{
+ struct platform_nand_data *pdata = pdev->dev.platform_data;
+ struct plat_nand_data *data;
+ int res = 0;
+
+ /* Allocate memory for the device structure (and zero it) */
+ data = kzalloc(sizeof(struct plat_nand_data), GFP_KERNEL);
+ if (!data) {
+ dev_err(&pdev->dev, "failed to allocate device structure.\n");
+ return -ENOMEM;
+ }
+
+ data->io_base = ioremap(pdev->resource[0].start,
+ pdev->resource[0].end - pdev->resource[0].start + 1);
+ if (data->io_base == NULL) {
+ dev_err(&pdev->dev, "ioremap failed\n");
+ kfree(data);
+ return -EIO;
+ }
+
+ data->chip.priv = &data;
+ data->mtd.priv = &data->chip;
+ data->mtd.owner = THIS_MODULE;
+
+ data->chip.IO_ADDR_R = data->io_base;
+ data->chip.IO_ADDR_W = data->io_base;
+ data->chip.cmd_ctrl = pdata->ctrl.cmd_ctrl;
+ data->chip.dev_ready = pdata->ctrl.dev_ready;
+ data->chip.select_chip = pdata->ctrl.select_chip;
+ data->chip.chip_delay = pdata->chip.chip_delay;
+ data->chip.options |= pdata->chip.options;
+
+ data->chip.ecc.hwctl = pdata->ctrl.hwcontrol;
+ data->chip.ecc.layout = pdata->chip.ecclayout;
+ data->chip.ecc.mode = NAND_ECC_SOFT;
+
+ platform_set_drvdata(pdev, data);
+
+ /* Scan to find existance of the device */
+ if (nand_scan(&data->mtd, 1)) {
+ res = -ENXIO;
+ goto out;
+ }
+
+#ifdef CONFIG_MTD_PARTITIONS
+ if (pdata->chip.part_probe_types) {
+ res = parse_mtd_partitions(&data->mtd,
+ pdata->chip.part_probe_types,
+ &data->parts, 0);
+ if (res > 0) {
+ add_mtd_partitions(&data->mtd, data->parts, res);
+ return 0;
+ }
+ }
+ if (pdata->chip.partitions) {
+ data->parts = pdata->chip.partitions;
+ res = add_mtd_partitions(&data->mtd, data->parts,
+ pdata->chip.nr_partitions);
+ } else
+#endif
+ res = add_mtd_device(&data->mtd);
+
+ if (!res)
+ return res;
+
+ nand_release(&data->mtd);
+out:
+ platform_set_drvdata(pdev, NULL);
+ iounmap(data->io_base);
+ kfree(data);
+ return res;
+}
+
+/*
+ * Remove a NAND device.
+ */
+static int __devexit plat_nand_remove(struct platform_device *pdev)
+{
+ struct plat_nand_data *data = platform_get_drvdata(pdev);
+ struct platform_nand_data *pdata = pdev->dev.platform_data;
+
+ nand_release(&data->mtd);
+#ifdef CONFIG_MTD_PARTITIONS
+ if (data->parts && data->parts != pdata->chip.partitions)
+ kfree(data->parts);
+#endif
+ iounmap(data->io_base);
+ kfree(data);
+
+ return 0;
+}
+
+static struct platform_driver plat_nand_driver = {
+ .probe = plat_nand_probe,
+ .remove = plat_nand_remove,
+ .driver = {
+ .name = "gen_nand",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init plat_nand_init(void)
+{
+ return platform_driver_register(&plat_nand_driver);
+}
+
+static void __exit plat_nand_exit(void)
+{
+ platform_driver_unregister(&plat_nand_driver);
+}
+
+module_init(plat_nand_init);
+module_exit(plat_nand_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Vitaly Wool");
+MODULE_DESCRIPTION("Simple generic NAND driver");
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index 000794c6caf..0537fac8de7 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -2192,7 +2192,7 @@ static int onenand_check_maf(int manuf)
* @param mtd MTD device structure
*
* OneNAND detection method:
- * Compare the the values from command with ones from register
+ * Compare the values from command with ones from register
*/
static int onenand_probe(struct mtd_info *mtd)
{
diff --git a/drivers/net/3c509.c b/drivers/net/3c509.c
index 9588da3a30e..127f60841b1 100644
--- a/drivers/net/3c509.c
+++ b/drivers/net/3c509.c
@@ -95,8 +95,7 @@ static int max_interrupt_work = 10;
#include <asm/io.h>
#include <asm/irq.h>
-static char versionA[] __initdata = DRV_NAME ".c:" DRV_VERSION " " DRV_RELDATE " becker@scyld.com\n";
-static char versionB[] __initdata = "http://www.scyld.com/network/3c509.html\n";
+static char version[] __initdata = DRV_NAME ".c:" DRV_VERSION " " DRV_RELDATE " becker@scyld.com\n";
#if defined(CONFIG_PM) && (defined(CONFIG_MCA) || defined(CONFIG_EISA))
#define EL3_SUSPEND
@@ -360,7 +359,7 @@ static int __init el3_common_init(struct net_device *dev)
printk(", IRQ %d.\n", dev->irq);
if (el3_debug > 0)
- printk(KERN_INFO "%s" KERN_INFO "%s", versionA, versionB);
+ printk(KERN_INFO "%s", version);
return 0;
}
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index 80924f76dee..f26ca331615 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -103,7 +103,7 @@ static int vortex_debug = 1;
static char version[] __devinitdata =
-DRV_NAME ": Donald Becker and others. www.scyld.com/network/vortex.html\n";
+DRV_NAME ": Donald Becker and others.\n";
MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
MODULE_DESCRIPTION("3Com 3c59x/3c9xx ethernet driver ");
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index b86ccd2ecd5..fa489b10c38 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2493,12 +2493,28 @@ config PASEMI_MAC
This driver supports the on-chip 1/10Gbit Ethernet controller on
PA Semi's PWRficient line of chips.
+config MLX4_CORE
+ tristate
+ depends on PCI
+ default n
+
+config MLX4_DEBUG
+ bool "Verbose debugging output" if (MLX4_CORE && EMBEDDED)
+ default y
+ ---help---
+ This option causes debugging code to be compiled into the
+ mlx4_core driver. The output can be turned on via the
+ debug_level module parameter (which can also be set after
+ the driver is loaded through sysfs).
+
endmenu
source "drivers/net/tokenring/Kconfig"
source "drivers/net/wireless/Kconfig"
+source "drivers/net/usb/Kconfig"
+
source "drivers/net/pcmcia/Kconfig"
source "drivers/net/wan/Kconfig"
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 59c0459a037..a77affa4f6e 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -197,6 +197,7 @@ obj-$(CONFIG_SMC911X) += smc911x.o
obj-$(CONFIG_DM9000) += dm9000.o
obj-$(CONFIG_FEC_8XX) += fec_8xx/
obj-$(CONFIG_PASEMI_MAC) += pasemi_mac.o
+obj-$(CONFIG_MLX4_CORE) += mlx4/
obj-$(CONFIG_MACB) += macb.o
@@ -206,6 +207,14 @@ obj-$(CONFIG_TR) += tokenring/
obj-$(CONFIG_WAN) += wan/
obj-$(CONFIG_ARCNET) += arcnet/
obj-$(CONFIG_NET_PCMCIA) += pcmcia/
+
+obj-$(CONFIG_USB_CATC) += usb/
+obj-$(CONFIG_USB_KAWETH) += usb/
+obj-$(CONFIG_USB_PEGASUS) += usb/
+obj-$(CONFIG_USB_RTL8150) += usb/
+obj-$(CONFIG_USB_USBNET) += usb/
+obj-$(CONFIG_USB_ZD1201) += usb/
+
obj-y += wireless/
obj-$(CONFIG_NET_TULIP) += tulip/
obj-$(CONFIG_HAMRADIO) += hamradio/
diff --git a/drivers/net/atl1/atl1_main.c b/drivers/net/atl1/atl1_main.c
index d28f88bbdd5..78cf00ff3d3 100644
--- a/drivers/net/atl1/atl1_main.c
+++ b/drivers/net/atl1/atl1_main.c
@@ -2038,6 +2038,15 @@ static int atl1_close(struct net_device *netdev)
return 0;
}
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void atl1_poll_controller(struct net_device *netdev)
+{
+ disable_irq(netdev->irq);
+ atl1_intr(netdev->irq, netdev);
+ enable_irq(netdev->irq);
+}
+#endif
+
/*
* If TPD Buffer size equal to 0, PCIE DMAR_TO_INT
* will assert. We do soft reset <0x1400=1> according
@@ -2190,6 +2199,9 @@ static int __devinit atl1_probe(struct pci_dev *pdev,
netdev->do_ioctl = &atl1_ioctl;
netdev->tx_timeout = &atl1_tx_timeout;
netdev->watchdog_timeo = 5 * HZ;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ netdev->poll_controller = atl1_poll_controller;
+#endif
netdev->vlan_rx_register = atl1_vlan_rx_register;
netdev->vlan_rx_add_vid = atl1_vlan_rx_add_vid;
netdev->vlan_rx_kill_vid = atl1_vlan_rx_kill_vid;
diff --git a/drivers/net/atp.c b/drivers/net/atp.c
index 18aba838c1f..82d78ff8399 100644
--- a/drivers/net/atp.c
+++ b/drivers/net/atp.c
@@ -31,10 +31,8 @@
*/
-static const char versionA[] =
+static const char version[] =
"atp.c:v1.09=ac 2002/10/01 Donald Becker <becker@scyld.com>\n";
-static const char versionB[] =
-" http://www.scyld.com/network/atp.html\n";
/* The user-configurable values.
These may be modified when a driver module is loaded.*/
@@ -324,7 +322,7 @@ static int __init atp_probe1(long ioaddr)
#ifndef MODULE
if (net_debug)
- printk(KERN_INFO "%s" KERN_INFO "%s", versionA, versionB);
+ printk(KERN_INFO "%s", version);
#endif
printk(KERN_NOTICE "%s: Pocket adapter found at %#3lx, IRQ %d, SAPROM "
@@ -926,7 +924,7 @@ static void set_rx_mode_8012(struct net_device *dev)
static int __init atp_init_module(void) {
if (debug) /* Emit version even if no cards detected. */
- printk(KERN_INFO "%s" KERN_INFO "%s", versionA, versionB);
+ printk(KERN_INFO "%s", version);
return atp_init();
}
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 724bce51f93..223517dcbcf 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -3461,7 +3461,7 @@ void bond_unregister_arp(struct bonding *bond)
/*---------------------------- Hashing Policies -----------------------------*/
/*
- * Hash for the the output device based upon layer 3 and layer 4 data. If
+ * Hash for the output device based upon layer 3 and layer 4 data. If
* the packet is a frag or not TCP or UDP, just use layer 3 data. If it is
* altogether not IP, mimic bond_xmit_hash_policy_l2()
*/
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 3a03a74c060..637ae8f6879 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -1214,7 +1214,7 @@ e1000_remove(struct pci_dev *pdev)
int i;
#endif
- flush_scheduled_work();
+ cancel_work_sync(&adapter->reset_task);
e1000_release_manageability(adapter);
diff --git a/drivers/net/eepro.c b/drivers/net/eepro.c
index 39654e1e2be..47680237f78 100644
--- a/drivers/net/eepro.c
+++ b/drivers/net/eepro.c
@@ -1126,7 +1126,7 @@ static void eepro_tx_timeout (struct net_device *dev)
printk (KERN_ERR "%s: transmit timed out, %s?\n", dev->name,
"network cable problem");
/* This is not a duplicate. One message for the console,
- one for the the log file */
+ one for the log file */
printk (KERN_DEBUG "%s: transmit timed out, %s?\n", dev->name,
"network cable problem");
eepro_complete_selreset(ioaddr);
diff --git a/drivers/net/eepro100.c b/drivers/net/eepro100.c
index 6c267c38df9..9800341956a 100644
--- a/drivers/net/eepro100.c
+++ b/drivers/net/eepro100.c
@@ -28,7 +28,7 @@
*/
static const char * const version =
-"eepro100.c:v1.09j-t 9/29/99 Donald Becker http://www.scyld.com/network/eepro100.html\n"
+"eepro100.c:v1.09j-t 9/29/99 Donald Becker\n"
"eepro100.c: $Revision: 1.36 $ 2000/11/17 Modified by Andrey V. Savochkin <saw@saw.sw.com.sg> and others\n";
/* A few user-configurable values that apply to all boards.
diff --git a/drivers/net/epic100.c b/drivers/net/epic100.c
index 4e3f14c9c71..5e517946f46 100644
--- a/drivers/net/epic100.c
+++ b/drivers/net/epic100.c
@@ -93,8 +93,6 @@ static int rx_copybreak;
static char version[] __devinitdata =
DRV_NAME ".c:v1.11 1/7/2001 Written by Donald Becker <becker@scyld.com>\n";
static char version2[] __devinitdata =
-" http://www.scyld.com/network/epic100.html\n";
-static char version3[] __devinitdata =
" (unofficial 2.4.x kernel port, version " DRV_VERSION ", " DRV_RELDATE ")\n";
MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
@@ -323,8 +321,8 @@ static int __devinit epic_init_one (struct pci_dev *pdev,
#ifndef MODULE
static int printed_version;
if (!printed_version++)
- printk (KERN_INFO "%s" KERN_INFO "%s" KERN_INFO "%s",
- version, version2, version3);
+ printk (KERN_INFO "%s" KERN_INFO "%s",
+ version, version2);
#endif
card_idx++;
@@ -1596,8 +1594,8 @@ static int __init epic_init (void)
{
/* when a module, this is printed whether or not devices are found in probe */
#ifdef MODULE
- printk (KERN_INFO "%s" KERN_INFO "%s" KERN_INFO "%s",
- version, version2, version3);
+ printk (KERN_INFO "%s" KERN_INFO "%s",
+ version, version2);
#endif
return pci_register_driver(&epic_driver);
diff --git a/drivers/net/hamradio/Kconfig b/drivers/net/hamradio/Kconfig
index 6e90619b3b4..36d2c7d4f4d 100644
--- a/drivers/net/hamradio/Kconfig
+++ b/drivers/net/hamradio/Kconfig
@@ -140,7 +140,7 @@ config BAYCOM_SER_HDX
modems that connect to a serial interface. The driver supports the
ser12 design in half-duplex mode. This is the old driver. It is
still provided in case your serial interface chip does not work with
- the full-duplex driver. This driver is depreciated. To configure
+ the full-duplex driver. This driver is deprecated. To configure
the driver, use the sethdlc utility available in the standard ax25
utilities package. For information on the modems, see
<http://www.baycom.de/> and
diff --git a/drivers/net/irda/donauboe.h b/drivers/net/irda/donauboe.h
index 2ab173d9a0e..1e67720f106 100644
--- a/drivers/net/irda/donauboe.h
+++ b/drivers/net/irda/donauboe.h
@@ -113,7 +113,7 @@
/* RxOver overflow in Recv FIFO */
/* SipRcv received serial gap (or other condition you set) */
/* Interrupts are enabled by writing a one to the IER register */
-/* Interrupts are cleared by writting a one to the ISR register */
+/* Interrupts are cleared by writing a one to the ISR register */
/* */
/* 6. The remaining registers: 0x6 and 0x3 appear to be */
/* reserved parts of 16 or 32 bit registersthe remainder */
diff --git a/drivers/net/ixgb/ixgb_ee.c b/drivers/net/ixgb/ixgb_ee.c
index f15aebde7b9..52c99d01d56 100644
--- a/drivers/net/ixgb/ixgb_ee.c
+++ b/drivers/net/ixgb/ixgb_ee.c
@@ -315,7 +315,7 @@ ixgb_wait_eeprom_command(struct ixgb_hw *hw)
* hw - Struct containing variables accessed by shared code
*
* Reads the first 64 16 bit words of the EEPROM and sums the values read.
- * If the the sum of the 64 16 bit words is 0xBABA, the EEPROM's checksum is
+ * If the sum of the 64 16 bit words is 0xBABA, the EEPROM's checksum is
* valid.
*
* Returns:
diff --git a/drivers/net/meth.h b/drivers/net/meth.h
index 84960dae2a2..ea3b8fc86d1 100644
--- a/drivers/net/meth.h
+++ b/drivers/net/meth.h
@@ -126,7 +126,7 @@ typedef struct rx_packet {
/* Note: when loopback is set this bit becomes collision control. Setting this bit will */
/* cause a collision to be reported. */
- /* Bits 5 and 6 are used to determine the the Destination address filter mode */
+ /* Bits 5 and 6 are used to determine the Destination address filter mode */
#define METH_ACCEPT_MY 0 /* 00: Accept PHY address only */
#define METH_ACCEPT_MCAST 0x20 /* 01: Accept physical, broadcast, and multicast filter matches only */
#define METH_ACCEPT_AMCAST 0x40 /* 10: Accept physical, broadcast, and all multicast packets */
diff --git a/drivers/net/mlx4/Makefile b/drivers/net/mlx4/Makefile
new file mode 100644
index 00000000000..0952a6528f5
--- /dev/null
+++ b/drivers/net/mlx4/Makefile
@@ -0,0 +1,4 @@
+obj-$(CONFIG_MLX4_CORE) += mlx4_core.o
+
+mlx4_core-y := alloc.o catas.o cmd.o cq.o eq.o fw.o icm.o intf.o main.o mcg.o \
+ mr.o pd.o profile.o qp.o reset.o srq.o
diff --git a/drivers/net/mlx4/alloc.c b/drivers/net/mlx4/alloc.c
new file mode 100644
index 00000000000..9ffdb9d29da
--- /dev/null
+++ b/drivers/net/mlx4/alloc.c
@@ -0,0 +1,179 @@
+/*
+ * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/bitmap.h>
+
+#include "mlx4.h"
+
+u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap)
+{
+ u32 obj;
+
+ spin_lock(&bitmap->lock);
+
+ obj = find_next_zero_bit(bitmap->table, bitmap->max, bitmap->last);
+ if (obj >= bitmap->max) {
+ bitmap->top = (bitmap->top + bitmap->max) & bitmap->mask;
+ obj = find_first_zero_bit(bitmap->table, bitmap->max);
+ }
+
+ if (obj < bitmap->max) {
+ set_bit(obj, bitmap->table);
+ obj |= bitmap->top;
+ bitmap->last = obj + 1;
+ } else
+ obj = -1;
+
+ spin_unlock(&bitmap->lock);
+
+ return obj;
+}
+
+void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj)
+{
+ obj &= bitmap->max - 1;
+
+ spin_lock(&bitmap->lock);
+ clear_bit(obj, bitmap->table);
+ bitmap->last = min(bitmap->last, obj);
+ bitmap->top = (bitmap->top + bitmap->max) & bitmap->mask;
+ spin_unlock(&bitmap->lock);
+}
+
+int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask, u32 reserved)
+{
+ int i;
+
+ /* num must be a power of 2 */
+ if (num != roundup_pow_of_two(num))
+ return -EINVAL;
+
+ bitmap->last = 0;
+ bitmap->top = 0;
+ bitmap->max = num;
+ bitmap->mask = mask;
+ spin_lock_init(&bitmap->lock);
+ bitmap->table = kzalloc(BITS_TO_LONGS(num) * sizeof (long), GFP_KERNEL);
+ if (!bitmap->table)
+ return -ENOMEM;
+
+ for (i = 0; i < reserved; ++i)
+ set_bit(i, bitmap->table);
+
+ return 0;
+}
+
+void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap)
+{
+ kfree(bitmap->table);
+}
+
+/*
+ * Handling for queue buffers -- we allocate a bunch of memory and
+ * register it in a memory region at HCA virtual address 0. If the
+ * requested size is > max_direct, we split the allocation into
+ * multiple pages, so we don't require too much contiguous memory.
+ */
+
+int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
+ struct mlx4_buf *buf)
+{
+ dma_addr_t t;
+
+ if (size <= max_direct) {
+ buf->nbufs = 1;
+ buf->npages = 1;
+ buf->page_shift = get_order(size) + PAGE_SHIFT;
+ buf->u.direct.buf = dma_alloc_coherent(&dev->pdev->dev,
+ size, &t, GFP_KERNEL);
+ if (!buf->u.direct.buf)
+ return -ENOMEM;
+
+ buf->u.direct.map = t;
+
+ while (t & ((1 << buf->page_shift) - 1)) {
+ --buf->page_shift;
+ buf->npages *= 2;
+ }
+
+ memset(buf->u.direct.buf, 0, size);
+ } else {
+ int i;
+
+ buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE;
+ buf->npages = buf->nbufs;
+ buf->page_shift = PAGE_SHIFT;
+ buf->u.page_list = kzalloc(buf->nbufs * sizeof *buf->u.page_list,
+ GFP_KERNEL);
+ if (!buf->u.page_list)
+ return -ENOMEM;
+
+ for (i = 0; i < buf->nbufs; ++i) {
+ buf->u.page_list[i].buf =
+ dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE,
+ &t, GFP_KERNEL);
+ if (!buf->u.page_list[i].buf)
+ goto err_free;
+
+ buf->u.page_list[i].map = t;
+
+ memset(buf->u.page_list[i].buf, 0, PAGE_SIZE);
+ }
+ }
+
+ return 0;
+
+err_free:
+ mlx4_buf_free(dev, size, buf);
+
+ return -ENOMEM;
+}
+EXPORT_SYMBOL_GPL(mlx4_buf_alloc);
+
+void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf)
+{
+ int i;
+
+ if (buf->nbufs == 1)
+ dma_free_coherent(&dev->pdev->dev, size, buf->u.direct.buf,
+ buf->u.direct.map);
+ else {
+ for (i = 0; i < buf->nbufs; ++i)
+ dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
+ buf->u.page_list[i].buf,
+ buf->u.page_list[i].map);
+ kfree(buf->u.page_list);
+ }
+}
+EXPORT_SYMBOL_GPL(mlx4_buf_free);
diff --git a/drivers/net/mlx4/catas.c b/drivers/net/mlx4/catas.c
new file mode 100644
index 00000000000..1bb088aeaf7
--- /dev/null
+++ b/drivers/net/mlx4/catas.c
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "mlx4.h"
+
+void mlx4_handle_catas_err(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ int i;
+
+ mlx4_err(dev, "Catastrophic error detected:\n");
+ for (i = 0; i < priv->fw.catas_size; ++i)
+ mlx4_err(dev, " buf[%02x]: %08x\n",
+ i, swab32(readl(priv->catas_err.map + i)));
+
+ mlx4_dispatch_event(dev, MLX4_EVENT_TYPE_LOCAL_CATAS_ERROR, 0, 0);
+}
+
+void mlx4_map_catas_buf(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ unsigned long addr;
+
+ addr = pci_resource_start(dev->pdev, priv->fw.catas_bar) +
+ priv->fw.catas_offset;
+
+ priv->catas_err.map = ioremap(addr, priv->fw.catas_size * 4);
+ if (!priv->catas_err.map)
+ mlx4_warn(dev, "Failed to map catastrophic error buffer at 0x%lx\n",
+ addr);
+
+}
+
+void mlx4_unmap_catas_buf(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ if (priv->catas_err.map)
+ iounmap(priv->catas_err.map);
+}
diff --git a/drivers/net/mlx4/cmd.c b/drivers/net/mlx4/cmd.c
new file mode 100644
index 00000000000..c1f81a993f5
--- /dev/null
+++ b/drivers/net/mlx4/cmd.c
@@ -0,0 +1,429 @@
+/*
+ * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/errno.h>
+
+#include <linux/mlx4/cmd.h>
+
+#include <asm/io.h>
+
+#include "mlx4.h"
+
+#define CMD_POLL_TOKEN 0xffff
+
+enum {
+ /* command completed successfully: */
+ CMD_STAT_OK = 0x00,
+ /* Internal error (such as a bus error) occurred while processing command: */
+ CMD_STAT_INTERNAL_ERR = 0x01,
+ /* Operation/command not supported or opcode modifier not supported: */
+ CMD_STAT_BAD_OP = 0x02,
+ /* Parameter not supported or parameter out of range: */
+ CMD_STAT_BAD_PARAM = 0x03,
+ /* System not enabled or bad system state: */
+ CMD_STAT_BAD_SYS_STATE = 0x04,
+ /* Attempt to access reserved or unallocaterd resource: */
+ CMD_STAT_BAD_RESOURCE = 0x05,
+ /* Requested resource is currently executing a command, or is otherwise busy: */
+ CMD_STAT_RESOURCE_BUSY = 0x06,
+ /* Required capability exceeds device limits: */
+ CMD_STAT_EXCEED_LIM = 0x08,
+ /* Resource is not in the appropriate state or ownership: */
+ CMD_STAT_BAD_RES_STATE = 0x09,
+ /* Index out of range: */
+ CMD_STAT_BAD_INDEX = 0x0a,
+ /* FW image corrupted: */
+ CMD_STAT_BAD_NVMEM = 0x0b,
+ /* Attempt to modify a QP/EE which is not in the presumed state: */
+ CMD_STAT_BAD_QP_STATE = 0x10,
+ /* Bad segment parameters (Address/Size): */
+ CMD_STAT_BAD_SEG_PARAM = 0x20,
+ /* Memory Region has Memory Windows bound to: */
+ CMD_STAT_REG_BOUND = 0x21,
+ /* HCA local attached memory not present: */
+ CMD_STAT_LAM_NOT_PRE = 0x22,
+ /* Bad management packet (silently discarded): */
+ CMD_STAT_BAD_PKT = 0x30,
+ /* More outstanding CQEs in CQ than new CQ size: */
+ CMD_STAT_BAD_SIZE = 0x40
+};
+
+enum {
+ HCR_IN_PARAM_OFFSET = 0x00,
+ HCR_IN_MODIFIER_OFFSET = 0x08,
+ HCR_OUT_PARAM_OFFSET = 0x0c,
+ HCR_TOKEN_OFFSET = 0x14,
+ HCR_STATUS_OFFSET = 0x18,
+
+ HCR_OPMOD_SHIFT = 12,
+ HCR_T_BIT = 21,
+ HCR_E_BIT = 22,
+ HCR_GO_BIT = 23
+};
+
+enum {
+ GO_BIT_TIMEOUT = 10000
+};
+
+struct mlx4_cmd_context {
+ struct completion done;
+ int result;
+ int next;
+ u64 out_param;
+ u16 token;
+};
+
+static int mlx4_status_to_errno(u8 status) {
+ static const int trans_table[] = {
+ [CMD_STAT_INTERNAL_ERR] = -EIO,
+ [CMD_STAT_BAD_OP] = -EPERM,
+ [CMD_STAT_BAD_PARAM] = -EINVAL,
+ [CMD_STAT_BAD_SYS_STATE] = -ENXIO,
+ [CMD_STAT_BAD_RESOURCE] = -EBADF,
+ [CMD_STAT_RESOURCE_BUSY] = -EBUSY,
+ [CMD_STAT_EXCEED_LIM] = -ENOMEM,
+ [CMD_STAT_BAD_RES_STATE] = -EBADF,
+ [CMD_STAT_BAD_INDEX] = -EBADF,
+ [CMD_STAT_BAD_NVMEM] = -EFAULT,
+ [CMD_STAT_BAD_QP_STATE] = -EINVAL,
+ [CMD_STAT_BAD_SEG_PARAM] = -EFAULT,
+ [CMD_STAT_REG_BOUND] = -EBUSY,
+ [CMD_STAT_LAM_NOT_PRE] = -EAGAIN,
+ [CMD_STAT_BAD_PKT] = -EINVAL,
+ [CMD_STAT_BAD_SIZE] = -ENOMEM,
+ };
+
+ if (status >= ARRAY_SIZE(trans_table) ||
+ (status != CMD_STAT_OK && trans_table[status] == 0))
+ return -EIO;
+
+ return trans_table[status];
+}
+
+static int cmd_pending(struct mlx4_dev *dev)
+{
+ u32 status = readl(mlx4_priv(dev)->cmd.hcr + HCR_STATUS_OFFSET);
+
+ return (status & swab32(1 << HCR_GO_BIT)) ||
+ (mlx4_priv(dev)->cmd.toggle ==
+ !!(status & swab32(1 << HCR_T_BIT)));
+}
+
+static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param,
+ u32 in_modifier, u8 op_modifier, u16 op, u16 token,
+ int event)
+{
+ struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
+ u32 __iomem *hcr = cmd->hcr;
+ int ret = -EAGAIN;
+ unsigned long end;
+
+ mutex_lock(&cmd->hcr_mutex);
+
+ end = jiffies;
+ if (event)
+ end += HZ * 10;
+
+ while (cmd_pending(dev)) {
+ if (time_after_eq(jiffies, end))
+ goto out;
+ cond_resched();
+ }
+
+ /*
+ * We use writel (instead of something like memcpy_toio)
+ * because writes of less than 32 bits to the HCR don't work
+ * (and some architectures such as ia64 implement memcpy_toio
+ * in terms of writeb).
+ */
+ __raw_writel((__force u32) cpu_to_be32(in_param >> 32), hcr + 0);
+ __raw_writel((__force u32) cpu_to_be32(in_param & 0xfffffffful), hcr + 1);
+ __raw_writel((__force u32) cpu_to_be32(in_modifier), hcr + 2);
+ __raw_writel((__force u32) cpu_to_be32(out_param >> 32), hcr + 3);
+ __raw_writel((__force u32) cpu_to_be32(out_param & 0xfffffffful), hcr + 4);
+ __raw_writel((__force u32) cpu_to_be32(token << 16), hcr + 5);
+
+ /* __raw_writel may not order writes. */
+ wmb();
+
+ __raw_writel((__force u32) cpu_to_be32((1 << HCR_GO_BIT) |
+ (cmd->toggle << HCR_T_BIT) |
+ (event ? (1 << HCR_E_BIT) : 0) |
+ (op_modifier << HCR_OPMOD_SHIFT) |
+ op), hcr + 6);
+ cmd->toggle = cmd->toggle ^ 1;
+
+ ret = 0;
+
+out:
+ mutex_unlock(&cmd->hcr_mutex);
+ return ret;
+}
+
+static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
+ int out_is_imm, u32 in_modifier, u8 op_modifier,
+ u16 op, unsigned long timeout)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ void __iomem *hcr = priv->cmd.hcr;
+ int err = 0;
+ unsigned long end;
+
+ down(&priv->cmd.poll_sem);
+
+ err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
+ in_modifier, op_modifier, op, CMD_POLL_TOKEN, 0);
+ if (err)
+ goto out;
+
+ end = msecs_to_jiffies(timeout) + jiffies;
+ while (cmd_pending(dev) && time_before(jiffies, end))
+ cond_resched();
+
+ if (cmd_pending(dev)) {
+ err = -ETIMEDOUT;
+ goto out;
+ }
+
+ if (out_is_imm)
+ *out_param =
+ (u64) be32_to_cpu((__force __be32)
+ __raw_readl(hcr + HCR_OUT_PARAM_OFFSET)) << 32 |
+ (u64) be32_to_cpu((__force __be32)
+ __raw_readl(hcr + HCR_OUT_PARAM_OFFSET + 4));
+
+ err = mlx4_status_to_errno(be32_to_cpu((__force __be32)
+ __raw_readl(hcr + HCR_STATUS_OFFSET)) >> 24);
+
+out:
+ up(&priv->cmd.poll_sem);
+ return err;
+}
+
+void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_cmd_context *context =
+ &priv->cmd.context[token & priv->cmd.token_mask];
+
+ /* previously timed out command completing at long last */
+ if (token != context->token)
+ return;
+
+ context->result = mlx4_status_to_errno(status);
+ context->out_param = out_param;
+
+ context->token += priv->cmd.token_mask + 1;
+
+ complete(&context->done);
+}
+
+static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
+ int out_is_imm, u32 in_modifier, u8 op_modifier,
+ u16 op, unsigned long timeout)
+{
+ struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
+ struct mlx4_cmd_context *context;
+ int err = 0;
+
+ down(&cmd->event_sem);
+
+ spin_lock(&cmd->context_lock);
+ BUG_ON(cmd->free_head < 0);
+ context = &cmd->context[cmd->free_head];
+ cmd->free_head = context->next;
+ spin_unlock(&cmd->context_lock);
+
+ init_completion(&context->done);
+
+ mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
+ in_modifier, op_modifier, op, context->token, 1);
+
+ if (!wait_for_completion_timeout(&context->done, msecs_to_jiffies(timeout))) {
+ err = -EBUSY;
+ goto out;
+ }
+
+ err = context->result;
+ if (err)
+ goto out;
+
+ if (out_is_imm)
+ *out_param = context->out_param;
+
+out:
+ spin_lock(&cmd->context_lock);
+ context->next = cmd->free_head;
+ cmd->free_head = context - cmd->context;
+ spin_unlock(&cmd->context_lock);
+
+ up(&cmd->event_sem);
+ return err;
+}
+
+int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
+ int out_is_imm, u32 in_modifier, u8 op_modifier,
+ u16 op, unsigned long timeout)
+{
+ if (mlx4_priv(dev)->cmd.use_events)
+ return mlx4_cmd_wait(dev, in_param, out_param, out_is_imm,
+ in_modifier, op_modifier, op, timeout);
+ else
+ return mlx4_cmd_poll(dev, in_param, out_param, out_is_imm,
+ in_modifier, op_modifier, op, timeout);
+}
+EXPORT_SYMBOL_GPL(__mlx4_cmd);
+
+int mlx4_cmd_init(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ mutex_init(&priv->cmd.hcr_mutex);
+ sema_init(&priv->cmd.poll_sem, 1);
+ priv->cmd.use_events = 0;
+ priv->cmd.toggle = 1;
+
+ priv->cmd.hcr = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_HCR_BASE,
+ MLX4_HCR_SIZE);
+ if (!priv->cmd.hcr) {
+ mlx4_err(dev, "Couldn't map command register.");
+ return -ENOMEM;
+ }
+
+ priv->cmd.pool = pci_pool_create("mlx4_cmd", dev->pdev,
+ MLX4_MAILBOX_SIZE,
+ MLX4_MAILBOX_SIZE, 0);
+ if (!priv->cmd.pool) {
+ iounmap(priv->cmd.hcr);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void mlx4_cmd_cleanup(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ pci_pool_destroy(priv->cmd.pool);
+ iounmap(priv->cmd.hcr);
+}
+
+/*
+ * Switch to using events to issue FW commands (can only be called
+ * after event queue for command events has been initialized).
+ */
+int mlx4_cmd_use_events(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int i;
+
+ priv->cmd.context = kmalloc(priv->cmd.max_cmds *
+ sizeof (struct mlx4_cmd_context),
+ GFP_KERNEL);
+ if (!priv->cmd.context)
+ return -ENOMEM;
+
+ for (i = 0; i < priv->cmd.max_cmds; ++i) {
+ priv->cmd.context[i].token = i;
+ priv->cmd.context[i].next = i + 1;
+ }
+
+ priv->cmd.context[priv->cmd.max_cmds - 1].next = -1;
+ priv->cmd.free_head = 0;
+
+ sema_init(&priv->cmd.event_sem, priv->cmd.max_cmds);
+ spin_lock_init(&priv->cmd.context_lock);
+
+ for (priv->cmd.token_mask = 1;
+ priv->cmd.token_mask < priv->cmd.max_cmds;
+ priv->cmd.token_mask <<= 1)
+ ; /* nothing */
+ --priv->cmd.token_mask;
+
+ priv->cmd.use_events = 1;
+
+ down(&priv->cmd.poll_sem);
+
+ return 0;
+}
+
+/*
+ * Switch back to polling (used when shutting down the device)
+ */
+void mlx4_cmd_use_polling(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int i;
+
+ priv->cmd.use_events = 0;
+
+ for (i = 0; i < priv->cmd.max_cmds; ++i)
+ down(&priv->cmd.event_sem);
+
+ kfree(priv->cmd.context);
+
+ up(&priv->cmd.poll_sem);
+}
+
+struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+
+ mailbox = kmalloc(sizeof *mailbox, GFP_KERNEL);
+ if (!mailbox)
+ return ERR_PTR(-ENOMEM);
+
+ mailbox->buf = pci_pool_alloc(mlx4_priv(dev)->cmd.pool, GFP_KERNEL,
+ &mailbox->dma);
+ if (!mailbox->buf) {
+ kfree(mailbox);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return mailbox;
+}
+EXPORT_SYMBOL_GPL(mlx4_alloc_cmd_mailbox);
+
+void mlx4_free_cmd_mailbox(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox)
+{
+ if (!mailbox)
+ return;
+
+ pci_pool_free(mlx4_priv(dev)->cmd.pool, mailbox->buf, mailbox->dma);
+ kfree(mailbox);
+}
+EXPORT_SYMBOL_GPL(mlx4_free_cmd_mailbox);
diff --git a/drivers/net/mlx4/cq.c b/drivers/net/mlx4/cq.c
new file mode 100644
index 00000000000..437d78ad091
--- /dev/null
+++ b/drivers/net/mlx4/cq.c
@@ -0,0 +1,254 @@
+/*
+ * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/init.h>
+#include <linux/hardirq.h>
+
+#include <linux/mlx4/cmd.h>
+
+#include "mlx4.h"
+#include "icm.h"
+
+struct mlx4_cq_context {
+ __be32 flags;
+ u16 reserved1[3];
+ __be16 page_offset;
+ __be32 logsize_usrpage;
+ u8 reserved2;
+ u8 cq_period;
+ u8 reserved3;
+ u8 cq_max_count;
+ u8 reserved4[3];
+ u8 comp_eqn;
+ u8 log_page_size;
+ u8 reserved5[2];
+ u8 mtt_base_addr_h;
+ __be32 mtt_base_addr_l;
+ __be32 last_notified_index;
+ __be32 solicit_producer_index;
+ __be32 consumer_index;
+ __be32 producer_index;
+ u8 reserved6[2];
+ __be64 db_rec_addr;
+};
+
+#define MLX4_CQ_STATUS_OK ( 0 << 28)
+#define MLX4_CQ_STATUS_OVERFLOW ( 9 << 28)
+#define MLX4_CQ_STATUS_WRITE_FAIL (10 << 28)
+#define MLX4_CQ_FLAG_CC ( 1 << 18)
+#define MLX4_CQ_FLAG_OI ( 1 << 17)
+#define MLX4_CQ_STATE_ARMED ( 9 << 8)
+#define MLX4_CQ_STATE_ARMED_SOL ( 6 << 8)
+#define MLX4_EQ_STATE_FIRED (10 << 8)
+
+void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn)
+{
+ struct mlx4_cq *cq;
+
+ cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree,
+ cqn & (dev->caps.num_cqs - 1));
+ if (!cq) {
+ mlx4_warn(dev, "Completion event for bogus CQ %08x\n", cqn);
+ return;
+ }
+
+ ++cq->arm_sn;
+
+ cq->comp(cq);
+}
+
+void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type)
+{
+ struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table;
+ struct mlx4_cq *cq;
+
+ spin_lock(&cq_table->lock);
+
+ cq = radix_tree_lookup(&cq_table->tree, cqn & (dev->caps.num_cqs - 1));
+ if (cq)
+ atomic_inc(&cq->refcount);
+
+ spin_unlock(&cq_table->lock);
+
+ if (!cq) {
+ mlx4_warn(dev, "Async event for bogus CQ %08x\n", cqn);
+ return;
+ }
+
+ cq->event(cq, event_type);
+
+ if (atomic_dec_and_test(&cq->refcount))
+ complete(&cq->free);
+}
+
+static int mlx4_SW2HW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
+ int cq_num)
+{
+ return mlx4_cmd(dev, mailbox->dma, cq_num, 0, MLX4_CMD_SW2HW_CQ,
+ MLX4_CMD_TIME_CLASS_A);
+}
+
+static int mlx4_HW2SW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
+ int cq_num)
+{
+ return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, cq_num,
+ mailbox ? 0 : 1, MLX4_CMD_HW2SW_CQ,
+ MLX4_CMD_TIME_CLASS_A);
+}
+
+int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
+ struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_cq_table *cq_table = &priv->cq_table;
+ struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_cq_context *cq_context;
+ u64 mtt_addr;
+ int err;
+
+ cq->cqn = mlx4_bitmap_alloc(&cq_table->bitmap);
+ if (cq->cqn == -1)
+ return -ENOMEM;
+
+ err = mlx4_table_get(dev, &cq_table->table, cq->cqn);
+ if (err)
+ goto err_out;
+
+ err = mlx4_table_get(dev, &cq_table->cmpt_table, cq->cqn);
+ if (err)
+ goto err_put;
+
+ spin_lock_irq(&cq_table->lock);
+ err = radix_tree_insert(&cq_table->tree, cq->cqn, cq);
+ spin_unlock_irq(&cq_table->lock);
+ if (err)
+ goto err_cmpt_put;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox)) {
+ err = PTR_ERR(mailbox);
+ goto err_radix;
+ }
+
+ cq_context = mailbox->buf;
+ memset(cq_context, 0, sizeof *cq_context);
+
+ cq_context->logsize_usrpage = cpu_to_be32((ilog2(nent) << 24) | uar->index);
+ cq_context->comp_eqn = priv->eq_table.eq[MLX4_EQ_COMP].eqn;
+ cq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
+
+ mtt_addr = mlx4_mtt_addr(dev, mtt);
+ cq_context->mtt_base_addr_h = mtt_addr >> 32;
+ cq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
+ cq_context->db_rec_addr = cpu_to_be64(db_rec);
+
+ err = mlx4_SW2HW_CQ(dev, mailbox, cq->cqn);
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ if (err)
+ goto err_radix;
+
+ cq->cons_index = 0;
+ cq->arm_sn = 1;
+ cq->uar = uar;
+ atomic_set(&cq->refcount, 1);
+ init_completion(&cq->free);
+
+ return 0;
+
+err_radix:
+ spin_lock_irq(&cq_table->lock);
+ radix_tree_delete(&cq_table->tree, cq->cqn);
+ spin_unlock_irq(&cq_table->lock);
+
+err_cmpt_put:
+ mlx4_table_put(dev, &cq_table->cmpt_table, cq->cqn);
+
+err_put:
+ mlx4_table_put(dev, &cq_table->table, cq->cqn);
+
+err_out:
+ mlx4_bitmap_free(&cq_table->bitmap, cq->cqn);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_cq_alloc);
+
+void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_cq_table *cq_table = &priv->cq_table;
+ int err;
+
+ err = mlx4_HW2SW_CQ(dev, NULL, cq->cqn);
+ if (err)
+ mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, cq->cqn);
+
+ synchronize_irq(priv->eq_table.eq[MLX4_EQ_COMP].irq);
+
+ spin_lock_irq(&cq_table->lock);
+ radix_tree_delete(&cq_table->tree, cq->cqn);
+ spin_unlock_irq(&cq_table->lock);
+
+ if (atomic_dec_and_test(&cq->refcount))
+ complete(&cq->free);
+ wait_for_completion(&cq->free);
+
+ mlx4_table_put(dev, &cq_table->table, cq->cqn);
+ mlx4_bitmap_free(&cq_table->bitmap, cq->cqn);
+}
+EXPORT_SYMBOL_GPL(mlx4_cq_free);
+
+int __devinit mlx4_init_cq_table(struct mlx4_dev *dev)
+{
+ struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table;
+ int err;
+
+ spin_lock_init(&cq_table->lock);
+ INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC);
+
+ err = mlx4_bitmap_init(&cq_table->bitmap, dev->caps.num_cqs,
+ dev->caps.num_cqs - 1, dev->caps.reserved_cqs);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+void mlx4_cleanup_cq_table(struct mlx4_dev *dev)
+{
+ /* Nothing to do to clean up radix_tree */
+ mlx4_bitmap_cleanup(&mlx4_priv(dev)->cq_table.bitmap);
+}
diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c
new file mode 100644
index 00000000000..acf1c801a1b
--- /dev/null
+++ b/drivers/net/mlx4/eq.c
@@ -0,0 +1,696 @@
+/*
+ * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+
+#include <linux/mlx4/cmd.h>
+
+#include "mlx4.h"
+#include "fw.h"
+
+enum {
+ MLX4_NUM_ASYNC_EQE = 0x100,
+ MLX4_NUM_SPARE_EQE = 0x80,
+ MLX4_EQ_ENTRY_SIZE = 0x20
+};
+
+/*
+ * Must be packed because start is 64 bits but only aligned to 32 bits.
+ */
+struct mlx4_eq_context {
+ __be32 flags;
+ u16 reserved1[3];
+ __be16 page_offset;
+ u8 log_eq_size;
+ u8 reserved2[4];
+ u8 eq_period;
+ u8 reserved3;
+ u8 eq_max_count;
+ u8 reserved4[3];
+ u8 intr;
+ u8 log_page_size;
+ u8 reserved5[2];
+ u8 mtt_base_addr_h;
+ __be32 mtt_base_addr_l;
+ u32 reserved6[2];
+ __be32 consumer_index;
+ __be32 producer_index;
+ u32 reserved7[4];
+};
+
+#define MLX4_EQ_STATUS_OK ( 0 << 28)
+#define MLX4_EQ_STATUS_WRITE_FAIL (10 << 28)
+#define MLX4_EQ_OWNER_SW ( 0 << 24)
+#define MLX4_EQ_OWNER_HW ( 1 << 24)
+#define MLX4_EQ_FLAG_EC ( 1 << 18)
+#define MLX4_EQ_FLAG_OI ( 1 << 17)
+#define MLX4_EQ_STATE_ARMED ( 9 << 8)
+#define MLX4_EQ_STATE_FIRED (10 << 8)
+#define MLX4_EQ_STATE_ALWAYS_ARMED (11 << 8)
+
+#define MLX4_ASYNC_EVENT_MASK ((1ull << MLX4_EVENT_TYPE_PATH_MIG) | \
+ (1ull << MLX4_EVENT_TYPE_COMM_EST) | \
+ (1ull << MLX4_EVENT_TYPE_SQ_DRAINED) | \
+ (1ull << MLX4_EVENT_TYPE_CQ_ERROR) | \
+ (1ull << MLX4_EVENT_TYPE_WQ_CATAS_ERROR) | \
+ (1ull << MLX4_EVENT_TYPE_EEC_CATAS_ERROR) | \
+ (1ull << MLX4_EVENT_TYPE_PATH_MIG_FAILED) | \
+ (1ull << MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
+ (1ull << MLX4_EVENT_TYPE_WQ_ACCESS_ERROR) | \
+ (1ull << MLX4_EVENT_TYPE_LOCAL_CATAS_ERROR) | \
+ (1ull << MLX4_EVENT_TYPE_PORT_CHANGE) | \
+ (1ull << MLX4_EVENT_TYPE_ECC_DETECT) | \
+ (1ull << MLX4_EVENT_TYPE_SRQ_CATAS_ERROR) | \
+ (1ull << MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE) | \
+ (1ull << MLX4_EVENT_TYPE_SRQ_LIMIT) | \
+ (1ull << MLX4_EVENT_TYPE_CMD))
+#define MLX4_CATAS_EVENT_MASK (1ull << MLX4_EVENT_TYPE_LOCAL_CATAS_ERROR)
+
+struct mlx4_eqe {
+ u8 reserved1;
+ u8 type;
+ u8 reserved2;
+ u8 subtype;
+ union {
+ u32 raw[6];
+ struct {
+ __be32 cqn;
+ } __attribute__((packed)) comp;
+ struct {
+ u16 reserved1;
+ __be16 token;
+ u32 reserved2;
+ u8 reserved3[3];
+ u8 status;
+ __be64 out_param;
+ } __attribute__((packed)) cmd;
+ struct {
+ __be32 qpn;
+ } __attribute__((packed)) qp;
+ struct {
+ __be32 srqn;
+ } __attribute__((packed)) srq;
+ struct {
+ __be32 cqn;
+ u32 reserved1;
+ u8 reserved2[3];
+ u8 syndrome;
+ } __attribute__((packed)) cq_err;
+ struct {
+ u32 reserved1[2];
+ __be32 port;
+ } __attribute__((packed)) port_change;
+ } event;
+ u8 reserved3[3];
+ u8 owner;
+} __attribute__((packed));
+
+static void eq_set_ci(struct mlx4_eq *eq, int req_not)
+{
+ __raw_writel((__force u32) cpu_to_be32((eq->cons_index & 0xffffff) |
+ req_not << 31),
+ eq->doorbell);
+ /* We still want ordering, just not swabbing, so add a barrier */
+ mb();
+}
+
+static struct mlx4_eqe *get_eqe(struct mlx4_eq *eq, u32 entry)
+{
+ unsigned long off = (entry & (eq->nent - 1)) * MLX4_EQ_ENTRY_SIZE;
+ return eq->page_list[off / PAGE_SIZE].buf + off % PAGE_SIZE;
+}
+
+static struct mlx4_eqe *next_eqe_sw(struct mlx4_eq *eq)
+{
+ struct mlx4_eqe *eqe = get_eqe(eq, eq->cons_index);
+ return !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? NULL : eqe;
+}
+
+static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
+{
+ struct mlx4_eqe *eqe;
+ int cqn;
+ int eqes_found = 0;
+ int set_ci = 0;
+
+ while ((eqe = next_eqe_sw(eq))) {
+ /*
+ * Make sure we read EQ entry contents after we've
+ * checked the ownership bit.
+ */
+ rmb();
+
+ switch (eqe->type) {
+ case MLX4_EVENT_TYPE_COMP:
+ cqn = be32_to_cpu(eqe->event.comp.cqn) & 0xffffff;
+ mlx4_cq_completion(dev, cqn);
+ break;
+
+ case MLX4_EVENT_TYPE_PATH_MIG:
+ case MLX4_EVENT_TYPE_COMM_EST:
+ case MLX4_EVENT_TYPE_SQ_DRAINED:
+ case MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE:
+ case MLX4_EVENT_TYPE_WQ_CATAS_ERROR:
+ case MLX4_EVENT_TYPE_PATH_MIG_FAILED:
+ case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
+ case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR:
+ mlx4_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
+ eqe->type);
+ break;
+
+ case MLX4_EVENT_TYPE_SRQ_LIMIT:
+ case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR:
+ mlx4_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) & 0xffffff,
+ eqe->type);
+ break;
+
+ case MLX4_EVENT_TYPE_CMD:
+ mlx4_cmd_event(dev,
+ be16_to_cpu(eqe->event.cmd.token),
+ eqe->event.cmd.status,
+ be64_to_cpu(eqe->event.cmd.out_param));
+ break;
+
+ case MLX4_EVENT_TYPE_PORT_CHANGE:
+ mlx4_dispatch_event(dev, eqe->type, eqe->subtype,
+ be32_to_cpu(eqe->event.port_change.port) >> 28);
+ break;
+
+ case MLX4_EVENT_TYPE_CQ_ERROR:
+ mlx4_warn(dev, "CQ %s on CQN %06x\n",
+ eqe->event.cq_err.syndrome == 1 ?
+ "overrun" : "access violation",
+ be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff);
+ mlx4_cq_event(dev, be32_to_cpu(eqe->event.cq_err.cqn),
+ eqe->type);
+ break;
+
+ case MLX4_EVENT_TYPE_EQ_OVERFLOW:
+ mlx4_warn(dev, "EQ overrun on EQN %d\n", eq->eqn);
+ break;
+
+ case MLX4_EVENT_TYPE_EEC_CATAS_ERROR:
+ case MLX4_EVENT_TYPE_ECC_DETECT:
+ default:
+ mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at index %u\n",
+ eqe->type, eqe->subtype, eq->eqn, eq->cons_index);
+ break;
+ };
+
+ ++eq->cons_index;
+ eqes_found = 1;
+ ++set_ci;
+
+ /*
+ * The HCA will think the queue has overflowed if we
+ * don't tell it we've been processing events. We
+ * create our EQs with MLX4_NUM_SPARE_EQE extra
+ * entries, so we must update our consumer index at
+ * least that often.
+ */
+ if (unlikely(set_ci >= MLX4_NUM_SPARE_EQE)) {
+ /*
+ * Conditional on hca_type is OK here because
+ * this is a rare case, not the fast path.
+ */
+ eq_set_ci(eq, 0);
+ set_ci = 0;
+ }
+ }
+
+ eq_set_ci(eq, 1);
+
+ return eqes_found;
+}
+
+static irqreturn_t mlx4_interrupt(int irq, void *dev_ptr)
+{
+ struct mlx4_dev *dev = dev_ptr;
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int work = 0;
+ int i;
+
+ writel(priv->eq_table.clr_mask, priv->eq_table.clr_int);
+
+ for (i = 0; i < MLX4_EQ_CATAS; ++i)
+ work |= mlx4_eq_int(dev, &priv->eq_table.eq[i]);
+
+ return IRQ_RETVAL(work);
+}
+
+static irqreturn_t mlx4_msi_x_interrupt(int irq, void *eq_ptr)
+{
+ struct mlx4_eq *eq = eq_ptr;
+ struct mlx4_dev *dev = eq->dev;
+
+ mlx4_eq_int(dev, eq);
+
+ /* MSI-X vectors always belong to us */
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t mlx4_catas_interrupt(int irq, void *dev_ptr)
+{
+ mlx4_handle_catas_err(dev_ptr);
+
+ /* MSI-X vectors always belong to us */
+ return IRQ_HANDLED;
+}
+
+static int mlx4_MAP_EQ(struct mlx4_dev *dev, u64 event_mask, int unmap,
+ int eq_num)
+{
+ return mlx4_cmd(dev, event_mask, (unmap << 31) | eq_num,
+ 0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B);
+}
+
+static int mlx4_SW2HW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
+ int eq_num)
+{
+ return mlx4_cmd(dev, mailbox->dma, eq_num, 0, MLX4_CMD_SW2HW_EQ,
+ MLX4_CMD_TIME_CLASS_A);
+}
+
+static int mlx4_HW2SW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
+ int eq_num)
+{
+ return mlx4_cmd_box(dev, 0, mailbox->dma, eq_num, 0, MLX4_CMD_HW2SW_EQ,
+ MLX4_CMD_TIME_CLASS_A);
+}
+
+static void __devinit __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev,
+ struct mlx4_eq *eq)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int index;
+
+ index = eq->eqn / 4 - dev->caps.reserved_eqs / 4;
+
+ if (!priv->eq_table.uar_map[index]) {
+ priv->eq_table.uar_map[index] =
+ ioremap(pci_resource_start(dev->pdev, 2) +
+ ((eq->eqn / 4) << PAGE_SHIFT),
+ PAGE_SIZE);
+ if (!priv->eq_table.uar_map[index]) {
+ mlx4_err(dev, "Couldn't map EQ doorbell for EQN 0x%06x\n",
+ eq->eqn);
+ return NULL;
+ }
+ }
+
+ return priv->eq_table.uar_map[index] + 0x800 + 8 * (eq->eqn % 4);
+}
+
+static int __devinit mlx4_create_eq(struct mlx4_dev *dev, int nent,
+ u8 intr, struct mlx4_eq *eq)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_eq_context *eq_context;
+ int npages;
+ u64 *dma_list = NULL;
+ dma_addr_t t;
+ u64 mtt_addr;
+ int err = -ENOMEM;
+ int i;
+
+ eq->dev = dev;
+ eq->nent = roundup_pow_of_two(max(nent, 2));
+ npages = PAGE_ALIGN(eq->nent * MLX4_EQ_ENTRY_SIZE) / PAGE_SIZE;
+
+ eq->page_list = kmalloc(npages * sizeof *eq->page_list,
+ GFP_KERNEL);
+ if (!eq->page_list)
+ goto err_out;
+
+ for (i = 0; i < npages; ++i)
+ eq->page_list[i].buf = NULL;
+
+ dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
+ if (!dma_list)
+ goto err_out_free;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ goto err_out_free;
+ eq_context = mailbox->buf;
+
+ for (i = 0; i < npages; ++i) {
+ eq->page_list[i].buf = dma_alloc_coherent(&dev->pdev->dev,
+ PAGE_SIZE, &t, GFP_KERNEL);
+ if (!eq->page_list[i].buf)
+ goto err_out_free_pages;
+
+ dma_list[i] = t;
+ eq->page_list[i].map = t;
+
+ memset(eq->page_list[i].buf, 0, PAGE_SIZE);
+ }
+
+ eq->eqn = mlx4_bitmap_alloc(&priv->eq_table.bitmap);
+ if (eq->eqn == -1)
+ goto err_out_free_pages;
+
+ eq->doorbell = mlx4_get_eq_uar(dev, eq);
+ if (!eq->doorbell) {
+ err = -ENOMEM;
+ goto err_out_free_eq;
+ }
+
+ err = mlx4_mtt_init(dev, npages, PAGE_SHIFT, &eq->mtt);
+ if (err)
+ goto err_out_free_eq;
+
+ err = mlx4_write_mtt(dev, &eq->mtt, 0, npages, dma_list);
+ if (err)
+ goto err_out_free_mtt;
+
+ memset(eq_context, 0, sizeof *eq_context);
+ eq_context->flags = cpu_to_be32(MLX4_EQ_STATUS_OK |
+ MLX4_EQ_STATE_ARMED);
+ eq_context->log_eq_size = ilog2(eq->nent);
+ eq_context->intr = intr;
+ eq_context->log_page_size = PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT;
+
+ mtt_addr = mlx4_mtt_addr(dev, &eq->mtt);
+ eq_context->mtt_base_addr_h = mtt_addr >> 32;
+ eq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
+
+ err = mlx4_SW2HW_EQ(dev, mailbox, eq->eqn);
+ if (err) {
+ mlx4_warn(dev, "SW2HW_EQ failed (%d)\n", err);
+ goto err_out_free_mtt;
+ }
+
+ kfree(dma_list);
+ mlx4_free_cmd_mailbox(dev, mailbox);
+
+ eq->cons_index = 0;
+
+ return err;
+
+err_out_free_mtt:
+ mlx4_mtt_cleanup(dev, &eq->mtt);
+
+err_out_free_eq:
+ mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn);
+
+err_out_free_pages:
+ for (i = 0; i < npages; ++i)
+ if (eq->page_list[i].buf)
+ dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
+ eq->page_list[i].buf,
+ eq->page_list[i].map);
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+
+err_out_free:
+ kfree(eq->page_list);
+ kfree(dma_list);
+
+err_out:
+ return err;
+}
+
+static void mlx4_free_eq(struct mlx4_dev *dev,
+ struct mlx4_eq *eq)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_cmd_mailbox *mailbox;
+ int err;
+ int npages = PAGE_ALIGN(MLX4_EQ_ENTRY_SIZE * eq->nent) / PAGE_SIZE;
+ int i;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return;
+
+ err = mlx4_HW2SW_EQ(dev, mailbox, eq->eqn);
+ if (err)
+ mlx4_warn(dev, "HW2SW_EQ failed (%d)\n", err);
+
+ if (0) {
+ mlx4_dbg(dev, "Dumping EQ context %02x:\n", eq->eqn);
+ for (i = 0; i < sizeof (struct mlx4_eq_context) / 4; ++i) {
+ if (i % 4 == 0)
+ printk("[%02x] ", i * 4);
+ printk(" %08x", be32_to_cpup(mailbox->buf + i * 4));
+ if ((i + 1) % 4 == 0)
+ printk("\n");
+ }
+ }
+
+ mlx4_mtt_cleanup(dev, &eq->mtt);
+ for (i = 0; i < npages; ++i)
+ pci_free_consistent(dev->pdev, PAGE_SIZE,
+ eq->page_list[i].buf,
+ eq->page_list[i].map);
+
+ kfree(eq->page_list);
+ mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn);
+ mlx4_free_cmd_mailbox(dev, mailbox);
+}
+
+static void mlx4_free_irqs(struct mlx4_dev *dev)
+{
+ struct mlx4_eq_table *eq_table = &mlx4_priv(dev)->eq_table;
+ int i;
+
+ if (eq_table->have_irq)
+ free_irq(dev->pdev->irq, dev);
+ for (i = 0; i < MLX4_NUM_EQ; ++i)
+ if (eq_table->eq[i].have_irq)
+ free_irq(eq_table->eq[i].irq, eq_table->eq + i);
+}
+
+static int __devinit mlx4_map_clr_int(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ priv->clr_base = ioremap(pci_resource_start(dev->pdev, priv->fw.clr_int_bar) +
+ priv->fw.clr_int_base, MLX4_CLR_INT_SIZE);
+ if (!priv->clr_base) {
+ mlx4_err(dev, "Couldn't map interrupt clear register, aborting.\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void mlx4_unmap_clr_int(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ iounmap(priv->clr_base);
+}
+
+int __devinit mlx4_map_eq_icm(struct mlx4_dev *dev, u64 icm_virt)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int ret;
+
+ /*
+ * We assume that mapping one page is enough for the whole EQ
+ * context table. This is fine with all current HCAs, because
+ * we only use 32 EQs and each EQ uses 64 bytes of context
+ * memory, or 1 KB total.
+ */
+ priv->eq_table.icm_virt = icm_virt;
+ priv->eq_table.icm_page = alloc_page(GFP_HIGHUSER);
+ if (!priv->eq_table.icm_page)
+ return -ENOMEM;
+ priv->eq_table.icm_dma = pci_map_page(dev->pdev, priv->eq_table.icm_page, 0,
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ if (pci_dma_mapping_error(priv->eq_table.icm_dma)) {
+ __free_page(priv->eq_table.icm_page);
+ return -ENOMEM;
+ }
+
+ ret = mlx4_MAP_ICM_page(dev, priv->eq_table.icm_dma, icm_virt);
+ if (ret) {
+ pci_unmap_page(dev->pdev, priv->eq_table.icm_dma, PAGE_SIZE,
+ PCI_DMA_BIDIRECTIONAL);
+ __free_page(priv->eq_table.icm_page);
+ }
+
+ return ret;
+}
+
+void mlx4_unmap_eq_icm(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ mlx4_UNMAP_ICM(dev, priv->eq_table.icm_virt, 1);
+ pci_unmap_page(dev->pdev, priv->eq_table.icm_dma, PAGE_SIZE,
+ PCI_DMA_BIDIRECTIONAL);
+ __free_page(priv->eq_table.icm_page);
+}
+
+int __devinit mlx4_init_eq_table(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int err;
+ int i;
+
+ err = mlx4_bitmap_init(&priv->eq_table.bitmap, dev->caps.num_eqs,
+ dev->caps.num_eqs - 1, dev->caps.reserved_eqs);
+ if (err)
+ return err;
+
+ for (i = 0; i < ARRAY_SIZE(priv->eq_table.uar_map); ++i)
+ priv->eq_table.uar_map[i] = NULL;
+
+ err = mlx4_map_clr_int(dev);
+ if (err)
+ goto err_out_free;
+
+ priv->eq_table.clr_mask =
+ swab32(1 << (priv->eq_table.inta_pin & 31));
+ priv->eq_table.clr_int = priv->clr_base +
+ (priv->eq_table.inta_pin < 32 ? 4 : 0);
+
+ err = mlx4_create_eq(dev, dev->caps.num_cqs + MLX4_NUM_SPARE_EQE,
+ (dev->flags & MLX4_FLAG_MSI_X) ? MLX4_EQ_COMP : 0,
+ &priv->eq_table.eq[MLX4_EQ_COMP]);
+ if (err)
+ goto err_out_unmap;
+
+ err = mlx4_create_eq(dev, MLX4_NUM_ASYNC_EQE + MLX4_NUM_SPARE_EQE,
+ (dev->flags & MLX4_FLAG_MSI_X) ? MLX4_EQ_ASYNC : 0,
+ &priv->eq_table.eq[MLX4_EQ_ASYNC]);
+ if (err)
+ goto err_out_comp;
+
+ if (dev->flags & MLX4_FLAG_MSI_X) {
+ static const char *eq_name[] = {
+ [MLX4_EQ_COMP] = DRV_NAME " (comp)",
+ [MLX4_EQ_ASYNC] = DRV_NAME " (async)",
+ [MLX4_EQ_CATAS] = DRV_NAME " (catas)"
+ };
+
+ err = mlx4_create_eq(dev, 1, MLX4_EQ_CATAS,
+ &priv->eq_table.eq[MLX4_EQ_CATAS]);
+ if (err)
+ goto err_out_async;
+
+ for (i = 0; i < MLX4_EQ_CATAS; ++i) {
+ err = request_irq(priv->eq_table.eq[i].irq,
+ mlx4_msi_x_interrupt,
+ 0, eq_name[i], priv->eq_table.eq + i);
+ if (err)
+ goto err_out_catas;
+
+ priv->eq_table.eq[i].have_irq = 1;
+ }
+
+ err = request_irq(priv->eq_table.eq[MLX4_EQ_CATAS].irq,
+ mlx4_catas_interrupt, 0,
+ eq_name[MLX4_EQ_CATAS], dev);
+ if (err)
+ goto err_out_catas;
+
+ priv->eq_table.eq[MLX4_EQ_CATAS].have_irq = 1;
+ } else {
+ err = request_irq(dev->pdev->irq, mlx4_interrupt,
+ SA_SHIRQ, DRV_NAME, dev);
+ if (err)
+ goto err_out_async;
+
+ priv->eq_table.have_irq = 1;
+ }
+
+ err = mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0,
+ priv->eq_table.eq[MLX4_EQ_ASYNC].eqn);
+ if (err)
+ mlx4_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n",
+ priv->eq_table.eq[MLX4_EQ_ASYNC].eqn, err);
+
+ for (i = 0; i < MLX4_EQ_CATAS; ++i)
+ eq_set_ci(&priv->eq_table.eq[i], 1);
+
+ if (dev->flags & MLX4_FLAG_MSI_X) {
+ err = mlx4_MAP_EQ(dev, MLX4_CATAS_EVENT_MASK, 0,
+ priv->eq_table.eq[MLX4_EQ_CATAS].eqn);
+ if (err)
+ mlx4_warn(dev, "MAP_EQ for catas EQ %d failed (%d)\n",
+ priv->eq_table.eq[MLX4_EQ_CATAS].eqn, err);
+ }
+
+ return 0;
+
+err_out_catas:
+ mlx4_free_eq(dev, &priv->eq_table.eq[MLX4_EQ_CATAS]);
+
+err_out_async:
+ mlx4_free_eq(dev, &priv->eq_table.eq[MLX4_EQ_ASYNC]);
+
+err_out_comp:
+ mlx4_free_eq(dev, &priv->eq_table.eq[MLX4_EQ_COMP]);
+
+err_out_unmap:
+ mlx4_unmap_clr_int(dev);
+ mlx4_free_irqs(dev);
+
+err_out_free:
+ mlx4_bitmap_cleanup(&priv->eq_table.bitmap);
+ return err;
+}
+
+void mlx4_cleanup_eq_table(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int i;
+
+ if (dev->flags & MLX4_FLAG_MSI_X)
+ mlx4_MAP_EQ(dev, MLX4_CATAS_EVENT_MASK, 1,
+ priv->eq_table.eq[MLX4_EQ_CATAS].eqn);
+
+ mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 1,
+ priv->eq_table.eq[MLX4_EQ_ASYNC].eqn);
+
+ mlx4_free_irqs(dev);
+
+ for (i = 0; i < MLX4_EQ_CATAS; ++i)
+ mlx4_free_eq(dev, &priv->eq_table.eq[i]);
+ if (dev->flags & MLX4_FLAG_MSI_X)
+ mlx4_free_eq(dev, &priv->eq_table.eq[MLX4_EQ_CATAS]);
+
+ mlx4_unmap_clr_int(dev);
+
+ for (i = 0; i < ARRAY_SIZE(priv->eq_table.uar_map); ++i)
+ if (priv->eq_table.uar_map[i])
+ iounmap(priv->eq_table.uar_map[i]);
+
+ mlx4_bitmap_cleanup(&priv->eq_table.bitmap);
+}
diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c
new file mode 100644
index 00000000000..c4271731366
--- /dev/null
+++ b/drivers/net/mlx4/fw.c
@@ -0,0 +1,775 @@
+/*
+ * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/mlx4/cmd.h>
+
+#include "fw.h"
+#include "icm.h"
+
+extern void __buggy_use_of_MLX4_GET(void);
+extern void __buggy_use_of_MLX4_PUT(void);
+
+#define MLX4_GET(dest, source, offset) \
+ do { \
+ void *__p = (char *) (source) + (offset); \
+ switch (sizeof (dest)) { \
+ case 1: (dest) = *(u8 *) __p; break; \
+ case 2: (dest) = be16_to_cpup(__p); break; \
+ case 4: (dest) = be32_to_cpup(__p); break; \
+ case 8: (dest) = be64_to_cpup(__p); break; \
+ default: __buggy_use_of_MLX4_GET(); \
+ } \
+ } while (0)
+
+#define MLX4_PUT(dest, source, offset) \
+ do { \
+ void *__d = ((char *) (dest) + (offset)); \
+ switch (sizeof(source)) { \
+ case 1: *(u8 *) __d = (source); break; \
+ case 2: *(__be16 *) __d = cpu_to_be16(source); break; \
+ case 4: *(__be32 *) __d = cpu_to_be32(source); break; \
+ case 8: *(__be64 *) __d = cpu_to_be64(source); break; \
+ default: __buggy_use_of_MLX4_PUT(); \
+ } \
+ } while (0)
+
+static void dump_dev_cap_flags(struct mlx4_dev *dev, u32 flags)
+{
+ static const char *fname[] = {
+ [ 0] = "RC transport",
+ [ 1] = "UC transport",
+ [ 2] = "UD transport",
+ [ 3] = "SRC transport",
+ [ 4] = "reliable multicast",
+ [ 5] = "FCoIB support",
+ [ 6] = "SRQ support",
+ [ 7] = "IPoIB checksum offload",
+ [ 8] = "P_Key violation counter",
+ [ 9] = "Q_Key violation counter",
+ [10] = "VMM",
+ [16] = "MW support",
+ [17] = "APM support",
+ [18] = "Atomic ops support",
+ [19] = "Raw multicast support",
+ [20] = "Address vector port checking support",
+ [21] = "UD multicast support",
+ [24] = "Demand paging support",
+ [25] = "Router support"
+ };
+ int i;
+
+ mlx4_dbg(dev, "DEV_CAP flags:\n");
+ for (i = 0; i < 32; ++i)
+ if (fname[i] && (flags & (1 << i)))
+ mlx4_dbg(dev, " %s\n", fname[i]);
+}
+
+int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ u32 *outbox;
+ u8 field;
+ u16 size;
+ u16 stat_rate;
+ int err;
+
+#define QUERY_DEV_CAP_OUT_SIZE 0x100
+#define QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET 0x10
+#define QUERY_DEV_CAP_MAX_QP_SZ_OFFSET 0x11
+#define QUERY_DEV_CAP_RSVD_QP_OFFSET 0x12
+#define QUERY_DEV_CAP_MAX_QP_OFFSET 0x13
+#define QUERY_DEV_CAP_RSVD_SRQ_OFFSET 0x14
+#define QUERY_DEV_CAP_MAX_SRQ_OFFSET 0x15
+#define QUERY_DEV_CAP_RSVD_EEC_OFFSET 0x16
+#define QUERY_DEV_CAP_MAX_EEC_OFFSET 0x17
+#define QUERY_DEV_CAP_MAX_CQ_SZ_OFFSET 0x19
+#define QUERY_DEV_CAP_RSVD_CQ_OFFSET 0x1a
+#define QUERY_DEV_CAP_MAX_CQ_OFFSET 0x1b
+#define QUERY_DEV_CAP_MAX_MPT_OFFSET 0x1d
+#define QUERY_DEV_CAP_RSVD_EQ_OFFSET 0x1e
+#define QUERY_DEV_CAP_MAX_EQ_OFFSET 0x1f
+#define QUERY_DEV_CAP_RSVD_MTT_OFFSET 0x20
+#define QUERY_DEV_CAP_MAX_MRW_SZ_OFFSET 0x21
+#define QUERY_DEV_CAP_RSVD_MRW_OFFSET 0x22
+#define QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET 0x23
+#define QUERY_DEV_CAP_MAX_AV_OFFSET 0x27
+#define QUERY_DEV_CAP_MAX_REQ_QP_OFFSET 0x29
+#define QUERY_DEV_CAP_MAX_RES_QP_OFFSET 0x2b
+#define QUERY_DEV_CAP_MAX_RDMA_OFFSET 0x2f
+#define QUERY_DEV_CAP_RSZ_SRQ_OFFSET 0x33
+#define QUERY_DEV_CAP_ACK_DELAY_OFFSET 0x35
+#define QUERY_DEV_CAP_MTU_WIDTH_OFFSET 0x36
+#define QUERY_DEV_CAP_VL_PORT_OFFSET 0x37
+#define QUERY_DEV_CAP_MAX_GID_OFFSET 0x3b
+#define QUERY_DEV_CAP_RATE_SUPPORT_OFFSET 0x3c
+#define QUERY_DEV_CAP_MAX_PKEY_OFFSET 0x3f
+#define QUERY_DEV_CAP_FLAGS_OFFSET 0x44
+#define QUERY_DEV_CAP_RSVD_UAR_OFFSET 0x48
+#define QUERY_DEV_CAP_UAR_SZ_OFFSET 0x49
+#define QUERY_DEV_CAP_PAGE_SZ_OFFSET 0x4b
+#define QUERY_DEV_CAP_BF_OFFSET 0x4c
+#define QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET 0x4d
+#define QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET 0x4e
+#define QUERY_DEV_CAP_LOG_MAX_BF_PAGES_OFFSET 0x4f
+#define QUERY_DEV_CAP_MAX_SG_SQ_OFFSET 0x51
+#define QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET 0x52
+#define QUERY_DEV_CAP_MAX_SG_RQ_OFFSET 0x55
+#define QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET 0x56
+#define QUERY_DEV_CAP_MAX_QP_MCG_OFFSET 0x61
+#define QUERY_DEV_CAP_RSVD_MCG_OFFSET 0x62
+#define QUERY_DEV_CAP_MAX_MCG_OFFSET 0x63
+#define QUERY_DEV_CAP_RSVD_PD_OFFSET 0x64
+#define QUERY_DEV_CAP_MAX_PD_OFFSET 0x65
+#define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80
+#define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET 0x82
+#define QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET 0x84
+#define QUERY_DEV_CAP_ALTC_ENTRY_SZ_OFFSET 0x86
+#define QUERY_DEV_CAP_EQC_ENTRY_SZ_OFFSET 0x88
+#define QUERY_DEV_CAP_CQC_ENTRY_SZ_OFFSET 0x8a
+#define QUERY_DEV_CAP_SRQ_ENTRY_SZ_OFFSET 0x8c
+#define QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET 0x8e
+#define QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET 0x90
+#define QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET 0x92
+#define QUERY_DEV_CAP_BMME_FLAGS_OFFSET 0x97
+#define QUERY_DEV_CAP_RSVD_LKEY_OFFSET 0x98
+#define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+ outbox = mailbox->buf;
+
+ err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP,
+ MLX4_CMD_TIME_CLASS_A);
+
+ if (err)
+ goto out;
+
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_QP_OFFSET);
+ dev_cap->reserved_qps = 1 << (field & 0xf);
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_OFFSET);
+ dev_cap->max_qps = 1 << (field & 0x1f);
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_SRQ_OFFSET);
+ dev_cap->reserved_srqs = 1 << (field >> 4);
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SRQ_OFFSET);
+ dev_cap->max_srqs = 1 << (field & 0x1f);
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_CQ_SZ_OFFSET);
+ dev_cap->max_cq_sz = 1 << field;
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_CQ_OFFSET);
+ dev_cap->reserved_cqs = 1 << (field & 0xf);
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_CQ_OFFSET);
+ dev_cap->max_cqs = 1 << (field & 0x1f);
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MPT_OFFSET);
+ dev_cap->max_mpts = 1 << (field & 0x3f);
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_EQ_OFFSET);
+ dev_cap->reserved_eqs = 1 << (field & 0xf);
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_EQ_OFFSET);
+ dev_cap->max_eqs = 1 << (field & 0x7);
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MTT_OFFSET);
+ dev_cap->reserved_mtts = 1 << (field >> 4);
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MRW_SZ_OFFSET);
+ dev_cap->max_mrw_sz = 1 << field;
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MRW_OFFSET);
+ dev_cap->reserved_mrws = 1 << (field & 0xf);
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET);
+ dev_cap->max_mtt_seg = 1 << (field & 0x3f);
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_REQ_QP_OFFSET);
+ dev_cap->max_requester_per_qp = 1 << (field & 0x3f);
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RES_QP_OFFSET);
+ dev_cap->max_responder_per_qp = 1 << (field & 0x3f);
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RDMA_OFFSET);
+ dev_cap->max_rdma_global = 1 << (field & 0x3f);
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_ACK_DELAY_OFFSET);
+ dev_cap->local_ca_ack_delay = field & 0x1f;
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_MTU_WIDTH_OFFSET);
+ dev_cap->max_mtu = field >> 4;
+ dev_cap->max_port_width = field & 0xf;
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET);
+ dev_cap->max_vl = field >> 4;
+ dev_cap->num_ports = field & 0xf;
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GID_OFFSET);
+ dev_cap->max_gids = 1 << (field & 0xf);
+ MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET);
+ dev_cap->stat_rate_support = stat_rate;
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PKEY_OFFSET);
+ dev_cap->max_pkeys = 1 << (field & 0xf);
+ MLX4_GET(dev_cap->flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET);
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET);
+ dev_cap->reserved_uars = field >> 4;
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_UAR_SZ_OFFSET);
+ dev_cap->uar_size = 1 << ((field & 0x3f) + 20);
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_PAGE_SZ_OFFSET);
+ dev_cap->min_page_sz = 1 << field;
+
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_BF_OFFSET);
+ if (field & 0x80) {
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET);
+ dev_cap->bf_reg_size = 1 << (field & 0x1f);
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET);
+ dev_cap->bf_regs_per_page = 1 << (field & 0x3f);
+ mlx4_dbg(dev, "BlueFlame available (reg size %d, regs/page %d)\n",
+ dev_cap->bf_reg_size, dev_cap->bf_regs_per_page);
+ } else {
+ dev_cap->bf_reg_size = 0;
+ mlx4_dbg(dev, "BlueFlame not available\n");
+ }
+
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SG_SQ_OFFSET);
+ dev_cap->max_sq_sg = field;
+ MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET);
+ dev_cap->max_sq_desc_sz = size;
+
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_MCG_OFFSET);
+ dev_cap->max_qp_per_mcg = 1 << field;
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MCG_OFFSET);
+ dev_cap->reserved_mgms = field & 0xf;
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MCG_OFFSET);
+ dev_cap->max_mcgs = 1 << field;
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_PD_OFFSET);
+ dev_cap->reserved_pds = field >> 4;
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PD_OFFSET);
+ dev_cap->max_pds = 1 << (field & 0x3f);
+
+ MLX4_GET(size, outbox, QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET);
+ dev_cap->rdmarc_entry_sz = size;
+ MLX4_GET(size, outbox, QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET);
+ dev_cap->qpc_entry_sz = size;
+ MLX4_GET(size, outbox, QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET);
+ dev_cap->aux_entry_sz = size;
+ MLX4_GET(size, outbox, QUERY_DEV_CAP_ALTC_ENTRY_SZ_OFFSET);
+ dev_cap->altc_entry_sz = size;
+ MLX4_GET(size, outbox, QUERY_DEV_CAP_EQC_ENTRY_SZ_OFFSET);
+ dev_cap->eqc_entry_sz = size;
+ MLX4_GET(size, outbox, QUERY_DEV_CAP_CQC_ENTRY_SZ_OFFSET);
+ dev_cap->cqc_entry_sz = size;
+ MLX4_GET(size, outbox, QUERY_DEV_CAP_SRQ_ENTRY_SZ_OFFSET);
+ dev_cap->srq_entry_sz = size;
+ MLX4_GET(size, outbox, QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET);
+ dev_cap->cmpt_entry_sz = size;
+ MLX4_GET(size, outbox, QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET);
+ dev_cap->mtt_entry_sz = size;
+ MLX4_GET(size, outbox, QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET);
+ dev_cap->dmpt_entry_sz = size;
+
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET);
+ dev_cap->max_srq_sz = 1 << field;
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_SZ_OFFSET);
+ dev_cap->max_qp_sz = 1 << field;
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_RSZ_SRQ_OFFSET);
+ dev_cap->resize_srq = field & 1;
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SG_RQ_OFFSET);
+ dev_cap->max_rq_sg = field;
+ MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET);
+ dev_cap->max_rq_desc_sz = size;
+
+ MLX4_GET(dev_cap->bmme_flags, outbox,
+ QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
+ MLX4_GET(dev_cap->reserved_lkey, outbox,
+ QUERY_DEV_CAP_RSVD_LKEY_OFFSET);
+ MLX4_GET(dev_cap->max_icm_sz, outbox,
+ QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET);
+
+ if (dev_cap->bmme_flags & 1)
+ mlx4_dbg(dev, "Base MM extensions: yes "
+ "(flags %d, rsvd L_Key %08x)\n",
+ dev_cap->bmme_flags, dev_cap->reserved_lkey);
+ else
+ mlx4_dbg(dev, "Base MM extensions: no\n");
+
+ /*
+ * Each UAR has 4 EQ doorbells; so if a UAR is reserved, then
+ * we can't use any EQs whose doorbell falls on that page,
+ * even if the EQ itself isn't reserved.
+ */
+ dev_cap->reserved_eqs = max(dev_cap->reserved_uars * 4,
+ dev_cap->reserved_eqs);
+
+ mlx4_dbg(dev, "Max ICM size %lld MB\n",
+ (unsigned long long) dev_cap->max_icm_sz >> 20);
+ mlx4_dbg(dev, "Max QPs: %d, reserved QPs: %d, entry size: %d\n",
+ dev_cap->max_qps, dev_cap->reserved_qps, dev_cap->qpc_entry_sz);
+ mlx4_dbg(dev, "Max SRQs: %d, reserved SRQs: %d, entry size: %d\n",
+ dev_cap->max_srqs, dev_cap->reserved_srqs, dev_cap->srq_entry_sz);
+ mlx4_dbg(dev, "Max CQs: %d, reserved CQs: %d, entry size: %d\n",
+ dev_cap->max_cqs, dev_cap->reserved_cqs, dev_cap->cqc_entry_sz);
+ mlx4_dbg(dev, "Max EQs: %d, reserved EQs: %d, entry size: %d\n",
+ dev_cap->max_eqs, dev_cap->reserved_eqs, dev_cap->eqc_entry_sz);
+ mlx4_dbg(dev, "reserved MPTs: %d, reserved MTTs: %d\n",
+ dev_cap->reserved_mrws, dev_cap->reserved_mtts);
+ mlx4_dbg(dev, "Max PDs: %d, reserved PDs: %d, reserved UARs: %d\n",
+ dev_cap->max_pds, dev_cap->reserved_pds, dev_cap->reserved_uars);
+ mlx4_dbg(dev, "Max QP/MCG: %d, reserved MGMs: %d\n",
+ dev_cap->max_pds, dev_cap->reserved_mgms);
+ mlx4_dbg(dev, "Max CQEs: %d, max WQEs: %d, max SRQ WQEs: %d\n",
+ dev_cap->max_cq_sz, dev_cap->max_qp_sz, dev_cap->max_srq_sz);
+ mlx4_dbg(dev, "Local CA ACK delay: %d, max MTU: %d, port width cap: %d\n",
+ dev_cap->local_ca_ack_delay, 128 << dev_cap->max_mtu,
+ dev_cap->max_port_width);
+ mlx4_dbg(dev, "Max SQ desc size: %d, max SQ S/G: %d\n",
+ dev_cap->max_sq_desc_sz, dev_cap->max_sq_sg);
+ mlx4_dbg(dev, "Max RQ desc size: %d, max RQ S/G: %d\n",
+ dev_cap->max_rq_desc_sz, dev_cap->max_rq_sg);
+
+ dump_dev_cap_flags(dev, dev_cap->flags);
+
+out:
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
+
+int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_icm_iter iter;
+ __be64 *pages;
+ int lg;
+ int nent = 0;
+ int i;
+ int err = 0;
+ int ts = 0, tc = 0;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+ memset(mailbox->buf, 0, MLX4_MAILBOX_SIZE);
+ pages = mailbox->buf;
+
+ for (mlx4_icm_first(icm, &iter);
+ !mlx4_icm_last(&iter);
+ mlx4_icm_next(&iter)) {
+ /*
+ * We have to pass pages that are aligned to their
+ * size, so find the least significant 1 in the
+ * address or size and use that as our log2 size.
+ */
+ lg = ffs(mlx4_icm_addr(&iter) | mlx4_icm_size(&iter)) - 1;
+ if (lg < MLX4_ICM_PAGE_SHIFT) {
+ mlx4_warn(dev, "Got FW area not aligned to %d (%llx/%lx).\n",
+ MLX4_ICM_PAGE_SIZE,
+ (unsigned long long) mlx4_icm_addr(&iter),
+ mlx4_icm_size(&iter));
+ err = -EINVAL;
+ goto out;
+ }
+
+ for (i = 0; i < mlx4_icm_size(&iter) >> lg; ++i) {
+ if (virt != -1) {
+ pages[nent * 2] = cpu_to_be64(virt);
+ virt += 1 << lg;
+ }
+
+ pages[nent * 2 + 1] =
+ cpu_to_be64((mlx4_icm_addr(&iter) + (i << lg)) |
+ (lg - MLX4_ICM_PAGE_SHIFT));
+ ts += 1 << (lg - 10);
+ ++tc;
+
+ if (++nent == MLX4_MAILBOX_SIZE / 16) {
+ err = mlx4_cmd(dev, mailbox->dma, nent, 0, op,
+ MLX4_CMD_TIME_CLASS_B);
+ if (err)
+ goto out;
+ nent = 0;
+ }
+ }
+ }
+
+ if (nent)
+ err = mlx4_cmd(dev, mailbox->dma, nent, 0, op, MLX4_CMD_TIME_CLASS_B);
+ if (err)
+ goto out;
+
+ switch (op) {
+ case MLX4_CMD_MAP_FA:
+ mlx4_dbg(dev, "Mapped %d chunks/%d KB for FW.\n", tc, ts);
+ break;
+ case MLX4_CMD_MAP_ICM_AUX:
+ mlx4_dbg(dev, "Mapped %d chunks/%d KB for ICM aux.\n", tc, ts);
+ break;
+ case MLX4_CMD_MAP_ICM:
+ mlx4_dbg(dev, "Mapped %d chunks/%d KB at %llx for ICM.\n",
+ tc, ts, (unsigned long long) virt - (ts << 10));
+ break;
+ }
+
+out:
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
+
+int mlx4_MAP_FA(struct mlx4_dev *dev, struct mlx4_icm *icm)
+{
+ return mlx4_map_cmd(dev, MLX4_CMD_MAP_FA, icm, -1);
+}
+
+int mlx4_UNMAP_FA(struct mlx4_dev *dev)
+{
+ return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_FA, MLX4_CMD_TIME_CLASS_B);
+}
+
+
+int mlx4_RUN_FW(struct mlx4_dev *dev)
+{
+ return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_RUN_FW, MLX4_CMD_TIME_CLASS_A);
+}
+
+int mlx4_QUERY_FW(struct mlx4_dev *dev)
+{
+ struct mlx4_fw *fw = &mlx4_priv(dev)->fw;
+ struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
+ struct mlx4_cmd_mailbox *mailbox;
+ u32 *outbox;
+ int err = 0;
+ u64 fw_ver;
+ u8 lg;
+
+#define QUERY_FW_OUT_SIZE 0x100
+#define QUERY_FW_VER_OFFSET 0x00
+#define QUERY_FW_MAX_CMD_OFFSET 0x0f
+#define QUERY_FW_ERR_START_OFFSET 0x30
+#define QUERY_FW_ERR_SIZE_OFFSET 0x38
+#define QUERY_FW_ERR_BAR_OFFSET 0x3c
+
+#define QUERY_FW_SIZE_OFFSET 0x00
+#define QUERY_FW_CLR_INT_BASE_OFFSET 0x20
+#define QUERY_FW_CLR_INT_BAR_OFFSET 0x28
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+ outbox = mailbox->buf;
+
+ err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_FW,
+ MLX4_CMD_TIME_CLASS_A);
+ if (err)
+ goto out;
+
+ MLX4_GET(fw_ver, outbox, QUERY_FW_VER_OFFSET);
+ /*
+ * FW subminor version is at more signifant bits than minor
+ * version, so swap here.
+ */
+ dev->caps.fw_ver = (fw_ver & 0xffff00000000ull) |
+ ((fw_ver & 0xffff0000ull) >> 16) |
+ ((fw_ver & 0x0000ffffull) << 16);
+
+ MLX4_GET(lg, outbox, QUERY_FW_MAX_CMD_OFFSET);
+ cmd->max_cmds = 1 << lg;
+
+ mlx4_dbg(dev, "FW version %d.%d.%03d, max commands %d\n",
+ (int) (dev->caps.fw_ver >> 32),
+ (int) (dev->caps.fw_ver >> 16) & 0xffff,
+ (int) dev->caps.fw_ver & 0xffff,
+ cmd->max_cmds);
+
+ MLX4_GET(fw->catas_offset, outbox, QUERY_FW_ERR_START_OFFSET);
+ MLX4_GET(fw->catas_size, outbox, QUERY_FW_ERR_SIZE_OFFSET);
+ MLX4_GET(fw->catas_bar, outbox, QUERY_FW_ERR_BAR_OFFSET);
+ fw->catas_bar = (fw->catas_bar >> 6) * 2;
+
+ mlx4_dbg(dev, "Catastrophic error buffer at 0x%llx, size 0x%x, BAR %d\n",
+ (unsigned long long) fw->catas_offset, fw->catas_size, fw->catas_bar);
+
+ MLX4_GET(fw->fw_pages, outbox, QUERY_FW_SIZE_OFFSET);
+ MLX4_GET(fw->clr_int_base, outbox, QUERY_FW_CLR_INT_BASE_OFFSET);
+ MLX4_GET(fw->clr_int_bar, outbox, QUERY_FW_CLR_INT_BAR_OFFSET);
+ fw->clr_int_bar = (fw->clr_int_bar >> 6) * 2;
+
+ mlx4_dbg(dev, "FW size %d KB\n", fw->fw_pages >> 2);
+
+ /*
+ * Round up number of system pages needed in case
+ * MLX4_ICM_PAGE_SIZE < PAGE_SIZE.
+ */
+ fw->fw_pages =
+ ALIGN(fw->fw_pages, PAGE_SIZE / MLX4_ICM_PAGE_SIZE) >>
+ (PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT);
+
+ mlx4_dbg(dev, "Clear int @ %llx, BAR %d\n",
+ (unsigned long long) fw->clr_int_base, fw->clr_int_bar);
+
+out:
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
+
+static void get_board_id(void *vsd, char *board_id)
+{
+ int i;
+
+#define VSD_OFFSET_SIG1 0x00
+#define VSD_OFFSET_SIG2 0xde
+#define VSD_OFFSET_MLX_BOARD_ID 0xd0
+#define VSD_OFFSET_TS_BOARD_ID 0x20
+
+#define VSD_SIGNATURE_TOPSPIN 0x5ad
+
+ memset(board_id, 0, MLX4_BOARD_ID_LEN);
+
+ if (be16_to_cpup(vsd + VSD_OFFSET_SIG1) == VSD_SIGNATURE_TOPSPIN &&
+ be16_to_cpup(vsd + VSD_OFFSET_SIG2) == VSD_SIGNATURE_TOPSPIN) {
+ strlcpy(board_id, vsd + VSD_OFFSET_TS_BOARD_ID, MLX4_BOARD_ID_LEN);
+ } else {
+ /*
+ * The board ID is a string but the firmware byte
+ * swaps each 4-byte word before passing it back to
+ * us. Therefore we need to swab it before printing.
+ */
+ for (i = 0; i < 4; ++i)
+ ((u32 *) board_id)[i] =
+ swab32(*(u32 *) (vsd + VSD_OFFSET_MLX_BOARD_ID + i * 4));
+ }
+}
+
+int mlx4_QUERY_ADAPTER(struct mlx4_dev *dev, struct mlx4_adapter *adapter)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ u32 *outbox;
+ int err;
+
+#define QUERY_ADAPTER_OUT_SIZE 0x100
+#define QUERY_ADAPTER_VENDOR_ID_OFFSET 0x00
+#define QUERY_ADAPTER_DEVICE_ID_OFFSET 0x04
+#define QUERY_ADAPTER_REVISION_ID_OFFSET 0x08
+#define QUERY_ADAPTER_INTA_PIN_OFFSET 0x10
+#define QUERY_ADAPTER_VSD_OFFSET 0x20
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+ outbox = mailbox->buf;
+
+ err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_ADAPTER,
+ MLX4_CMD_TIME_CLASS_A);
+ if (err)
+ goto out;
+
+ MLX4_GET(adapter->vendor_id, outbox, QUERY_ADAPTER_VENDOR_ID_OFFSET);
+ MLX4_GET(adapter->device_id, outbox, QUERY_ADAPTER_DEVICE_ID_OFFSET);
+ MLX4_GET(adapter->revision_id, outbox, QUERY_ADAPTER_REVISION_ID_OFFSET);
+ MLX4_GET(adapter->inta_pin, outbox, QUERY_ADAPTER_INTA_PIN_OFFSET);
+
+ get_board_id(outbox + QUERY_ADAPTER_VSD_OFFSET / 4,
+ adapter->board_id);
+
+out:
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
+
+int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ __be32 *inbox;
+ int err;
+
+#define INIT_HCA_IN_SIZE 0x200
+#define INIT_HCA_VERSION_OFFSET 0x000
+#define INIT_HCA_VERSION 2
+#define INIT_HCA_FLAGS_OFFSET 0x014
+#define INIT_HCA_QPC_OFFSET 0x020
+#define INIT_HCA_QPC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x10)
+#define INIT_HCA_LOG_QP_OFFSET (INIT_HCA_QPC_OFFSET + 0x17)
+#define INIT_HCA_SRQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x28)
+#define INIT_HCA_LOG_SRQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x2f)
+#define INIT_HCA_CQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x30)
+#define INIT_HCA_LOG_CQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x37)
+#define INIT_HCA_ALTC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x40)
+#define INIT_HCA_AUXC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x50)
+#define INIT_HCA_EQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x60)
+#define INIT_HCA_LOG_EQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x67)
+#define INIT_HCA_RDMARC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x70)
+#define INIT_HCA_LOG_RD_OFFSET (INIT_HCA_QPC_OFFSET + 0x77)
+#define INIT_HCA_MCAST_OFFSET 0x0c0
+#define INIT_HCA_MC_BASE_OFFSET (INIT_HCA_MCAST_OFFSET + 0x00)
+#define INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x12)
+#define INIT_HCA_LOG_MC_HASH_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x16)
+#define INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b)
+#define INIT_HCA_TPT_OFFSET 0x0f0
+#define INIT_HCA_DMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x00)
+#define INIT_HCA_LOG_MPT_SZ_OFFSET (INIT_HCA_TPT_OFFSET + 0x0b)
+#define INIT_HCA_MTT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x10)
+#define INIT_HCA_CMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x18)
+#define INIT_HCA_UAR_OFFSET 0x120
+#define INIT_HCA_LOG_UAR_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x0a)
+#define INIT_HCA_UAR_PAGE_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x0b)
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+ inbox = mailbox->buf;
+
+ memset(inbox, 0, INIT_HCA_IN_SIZE);
+
+ *((u8 *) mailbox->buf + INIT_HCA_VERSION_OFFSET) = INIT_HCA_VERSION;
+
+#if defined(__LITTLE_ENDIAN)
+ *(inbox + INIT_HCA_FLAGS_OFFSET / 4) &= ~cpu_to_be32(1 << 1);
+#elif defined(__BIG_ENDIAN)
+ *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 1);
+#else
+#error Host endianness not defined
+#endif
+ /* Check port for UD address vector: */
+ *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1);
+
+ /* QPC/EEC/CQC/EQC/RDMARC attributes */
+
+ MLX4_PUT(inbox, param->qpc_base, INIT_HCA_QPC_BASE_OFFSET);
+ MLX4_PUT(inbox, param->log_num_qps, INIT_HCA_LOG_QP_OFFSET);
+ MLX4_PUT(inbox, param->srqc_base, INIT_HCA_SRQC_BASE_OFFSET);
+ MLX4_PUT(inbox, param->log_num_srqs, INIT_HCA_LOG_SRQ_OFFSET);
+ MLX4_PUT(inbox, param->cqc_base, INIT_HCA_CQC_BASE_OFFSET);
+ MLX4_PUT(inbox, param->log_num_cqs, INIT_HCA_LOG_CQ_OFFSET);
+ MLX4_PUT(inbox, param->altc_base, INIT_HCA_ALTC_BASE_OFFSET);
+ MLX4_PUT(inbox, param->auxc_base, INIT_HCA_AUXC_BASE_OFFSET);
+ MLX4_PUT(inbox, param->eqc_base, INIT_HCA_EQC_BASE_OFFSET);
+ MLX4_PUT(inbox, param->log_num_eqs, INIT_HCA_LOG_EQ_OFFSET);
+ MLX4_PUT(inbox, param->rdmarc_base, INIT_HCA_RDMARC_BASE_OFFSET);
+ MLX4_PUT(inbox, param->log_rd_per_qp, INIT_HCA_LOG_RD_OFFSET);
+
+ /* multicast attributes */
+
+ MLX4_PUT(inbox, param->mc_base, INIT_HCA_MC_BASE_OFFSET);
+ MLX4_PUT(inbox, param->log_mc_entry_sz, INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
+ MLX4_PUT(inbox, param->log_mc_hash_sz, INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
+ MLX4_PUT(inbox, param->log_mc_table_sz, INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
+
+ /* TPT attributes */
+
+ MLX4_PUT(inbox, param->dmpt_base, INIT_HCA_DMPT_BASE_OFFSET);
+ MLX4_PUT(inbox, param->log_mpt_sz, INIT_HCA_LOG_MPT_SZ_OFFSET);
+ MLX4_PUT(inbox, param->mtt_base, INIT_HCA_MTT_BASE_OFFSET);
+ MLX4_PUT(inbox, param->cmpt_base, INIT_HCA_CMPT_BASE_OFFSET);
+
+ /* UAR attributes */
+
+ MLX4_PUT(inbox, (u8) (PAGE_SHIFT - 12), INIT_HCA_UAR_PAGE_SZ_OFFSET);
+ MLX4_PUT(inbox, param->log_uar_sz, INIT_HCA_LOG_UAR_SZ_OFFSET);
+
+ err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA, 1000);
+
+ if (err)
+ mlx4_err(dev, "INIT_HCA returns %d\n", err);
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
+
+int mlx4_INIT_PORT(struct mlx4_dev *dev, struct mlx4_init_port_param *param, int port)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ u32 *inbox;
+ int err;
+ u32 flags;
+
+#define INIT_PORT_IN_SIZE 256
+#define INIT_PORT_FLAGS_OFFSET 0x00
+#define INIT_PORT_FLAG_SIG (1 << 18)
+#define INIT_PORT_FLAG_NG (1 << 17)
+#define INIT_PORT_FLAG_G0 (1 << 16)
+#define INIT_PORT_VL_SHIFT 4
+#define INIT_PORT_PORT_WIDTH_SHIFT 8
+#define INIT_PORT_MTU_OFFSET 0x04
+#define INIT_PORT_MAX_GID_OFFSET 0x06
+#define INIT_PORT_MAX_PKEY_OFFSET 0x0a
+#define INIT_PORT_GUID0_OFFSET 0x10
+#define INIT_PORT_NODE_GUID_OFFSET 0x18
+#define INIT_PORT_SI_GUID_OFFSET 0x20
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+ inbox = mailbox->buf;
+
+ memset(inbox, 0, INIT_PORT_IN_SIZE);
+
+ flags = 0;
+ flags |= param->set_guid0 ? INIT_PORT_FLAG_G0 : 0;
+ flags |= param->set_node_guid ? INIT_PORT_FLAG_NG : 0;
+ flags |= param->set_si_guid ? INIT_PORT_FLAG_SIG : 0;
+ flags |= (param->vl_cap & 0xf) << INIT_PORT_VL_SHIFT;
+ flags |= (param->port_width_cap & 0xf) << INIT_PORT_PORT_WIDTH_SHIFT;
+ MLX4_PUT(inbox, flags, INIT_PORT_FLAGS_OFFSET);
+
+ MLX4_PUT(inbox, param->mtu, INIT_PORT_MTU_OFFSET);
+ MLX4_PUT(inbox, param->max_gid, INIT_PORT_MAX_GID_OFFSET);
+ MLX4_PUT(inbox, param->max_pkey, INIT_PORT_MAX_PKEY_OFFSET);
+ MLX4_PUT(inbox, param->guid0, INIT_PORT_GUID0_OFFSET);
+ MLX4_PUT(inbox, param->node_guid, INIT_PORT_NODE_GUID_OFFSET);
+ MLX4_PUT(inbox, param->si_guid, INIT_PORT_SI_GUID_OFFSET);
+
+ err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_INIT_PORT,
+ MLX4_CMD_TIME_CLASS_A);
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_INIT_PORT);
+
+int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port)
+{
+ return mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 1000);
+}
+EXPORT_SYMBOL_GPL(mlx4_CLOSE_PORT);
+
+int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic)
+{
+ return mlx4_cmd(dev, 0, 0, panic, MLX4_CMD_CLOSE_HCA, 1000);
+}
+
+int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages)
+{
+ int ret = mlx4_cmd_imm(dev, icm_size, aux_pages, 0, 0,
+ MLX4_CMD_SET_ICM_SIZE,
+ MLX4_CMD_TIME_CLASS_A);
+ if (ret)
+ return ret;
+
+ /*
+ * Round up number of system pages needed in case
+ * MLX4_ICM_PAGE_SIZE < PAGE_SIZE.
+ */
+ *aux_pages = ALIGN(*aux_pages, PAGE_SIZE / MLX4_ICM_PAGE_SIZE) >>
+ (PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT);
+
+ return 0;
+}
+
+int mlx4_NOP(struct mlx4_dev *dev)
+{
+ /* Input modifier of 0x1f means "finish as soon as possible." */
+ return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, 100);
+}
diff --git a/drivers/net/mlx4/fw.h b/drivers/net/mlx4/fw.h
new file mode 100644
index 00000000000..2616fa53d4d
--- /dev/null
+++ b/drivers/net/mlx4/fw.h
@@ -0,0 +1,167 @@
+/*
+ * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2006, 2007 Cisco Systems. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef MLX4_FW_H
+#define MLX4_FW_H
+
+#include "mlx4.h"
+#include "icm.h"
+
+struct mlx4_dev_cap {
+ int max_srq_sz;
+ int max_qp_sz;
+ int reserved_qps;
+ int max_qps;
+ int reserved_srqs;
+ int max_srqs;
+ int max_cq_sz;
+ int reserved_cqs;
+ int max_cqs;
+ int max_mpts;
+ int reserved_eqs;
+ int max_eqs;
+ int reserved_mtts;
+ int max_mrw_sz;
+ int reserved_mrws;
+ int max_mtt_seg;
+ int max_requester_per_qp;
+ int max_responder_per_qp;
+ int max_rdma_global;
+ int local_ca_ack_delay;
+ int max_mtu;
+ int max_port_width;
+ int max_vl;
+ int num_ports;
+ int max_gids;
+ u16 stat_rate_support;
+ int max_pkeys;
+ u32 flags;
+ int reserved_uars;
+ int uar_size;
+ int min_page_sz;
+ int bf_reg_size;
+ int bf_regs_per_page;
+ int max_sq_sg;
+ int max_sq_desc_sz;
+ int max_rq_sg;
+ int max_rq_desc_sz;
+ int max_qp_per_mcg;
+ int reserved_mgms;
+ int max_mcgs;
+ int reserved_pds;
+ int max_pds;
+ int qpc_entry_sz;
+ int rdmarc_entry_sz;
+ int altc_entry_sz;
+ int aux_entry_sz;
+ int srq_entry_sz;
+ int cqc_entry_sz;
+ int eqc_entry_sz;
+ int dmpt_entry_sz;
+ int cmpt_entry_sz;
+ int mtt_entry_sz;
+ int resize_srq;
+ u8 bmme_flags;
+ u32 reserved_lkey;
+ u64 max_icm_sz;
+};
+
+struct mlx4_adapter {
+ u32 vendor_id;
+ u32 device_id;
+ u32 revision_id;
+ char board_id[MLX4_BOARD_ID_LEN];
+ u8 inta_pin;
+};
+
+struct mlx4_init_hca_param {
+ u64 qpc_base;
+ u64 rdmarc_base;
+ u64 auxc_base;
+ u64 altc_base;
+ u64 srqc_base;
+ u64 cqc_base;
+ u64 eqc_base;
+ u64 mc_base;
+ u64 dmpt_base;
+ u64 cmpt_base;
+ u64 mtt_base;
+ u16 log_mc_entry_sz;
+ u16 log_mc_hash_sz;
+ u8 log_num_qps;
+ u8 log_num_srqs;
+ u8 log_num_cqs;
+ u8 log_num_eqs;
+ u8 log_rd_per_qp;
+ u8 log_mc_table_sz;
+ u8 log_mpt_sz;
+ u8 log_uar_sz;
+};
+
+struct mlx4_init_ib_param {
+ int port_width;
+ int vl_cap;
+ int mtu_cap;
+ u16 gid_cap;
+ u16 pkey_cap;
+ int set_guid0;
+ u64 guid0;
+ int set_node_guid;
+ u64 node_guid;
+ int set_si_guid;
+ u64 si_guid;
+};
+
+struct mlx4_set_ib_param {
+ int set_si_guid;
+ int reset_qkey_viol;
+ u64 si_guid;
+ u32 cap_mask;
+};
+
+int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap);
+int mlx4_MAP_FA(struct mlx4_dev *dev, struct mlx4_icm *icm);
+int mlx4_UNMAP_FA(struct mlx4_dev *dev);
+int mlx4_RUN_FW(struct mlx4_dev *dev);
+int mlx4_QUERY_FW(struct mlx4_dev *dev);
+int mlx4_QUERY_ADAPTER(struct mlx4_dev *dev, struct mlx4_adapter *adapter);
+int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param);
+int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic);
+int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt);
+int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages);
+int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm);
+int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev);
+int mlx4_NOP(struct mlx4_dev *dev);
+
+#endif /* MLX4_FW_H */
diff --git a/drivers/net/mlx4/icm.c b/drivers/net/mlx4/icm.c
new file mode 100644
index 00000000000..e96feaed6ed
--- /dev/null
+++ b/drivers/net/mlx4/icm.c
@@ -0,0 +1,379 @@
+/*
+ * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/init.h>
+#include <linux/errno.h>
+
+#include <linux/mlx4/cmd.h>
+
+#include "mlx4.h"
+#include "icm.h"
+#include "fw.h"
+
+/*
+ * We allocate in as big chunks as we can, up to a maximum of 256 KB
+ * per chunk.
+ */
+enum {
+ MLX4_ICM_ALLOC_SIZE = 1 << 18,
+ MLX4_TABLE_CHUNK_SIZE = 1 << 18
+};
+
+void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm)
+{
+ struct mlx4_icm_chunk *chunk, *tmp;
+ int i;
+
+ list_for_each_entry_safe(chunk, tmp, &icm->chunk_list, list) {
+ if (chunk->nsg > 0)
+ pci_unmap_sg(dev->pdev, chunk->mem, chunk->npages,
+ PCI_DMA_BIDIRECTIONAL);
+
+ for (i = 0; i < chunk->npages; ++i)
+ __free_pages(chunk->mem[i].page,
+ get_order(chunk->mem[i].length));
+
+ kfree(chunk);
+ }
+
+ kfree(icm);
+}
+
+struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
+ gfp_t gfp_mask)
+{
+ struct mlx4_icm *icm;
+ struct mlx4_icm_chunk *chunk = NULL;
+ int cur_order;
+
+ icm = kmalloc(sizeof *icm, gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
+ if (!icm)
+ return icm;
+
+ icm->refcount = 0;
+ INIT_LIST_HEAD(&icm->chunk_list);
+
+ cur_order = get_order(MLX4_ICM_ALLOC_SIZE);
+
+ while (npages > 0) {
+ if (!chunk) {
+ chunk = kmalloc(sizeof *chunk,
+ gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
+ if (!chunk)
+ goto fail;
+
+ chunk->npages = 0;
+ chunk->nsg = 0;
+ list_add_tail(&chunk->list, &icm->chunk_list);
+ }
+
+ while (1 << cur_order > npages)
+ --cur_order;
+
+ chunk->mem[chunk->npages].page = alloc_pages(gfp_mask, cur_order);
+ if (chunk->mem[chunk->npages].page) {
+ chunk->mem[chunk->npages].length = PAGE_SIZE << cur_order;
+ chunk->mem[chunk->npages].offset = 0;
+
+ if (++chunk->npages == MLX4_ICM_CHUNK_LEN) {
+ chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
+ chunk->npages,
+ PCI_DMA_BIDIRECTIONAL);
+
+ if (chunk->nsg <= 0)
+ goto fail;
+
+ chunk = NULL;
+ }
+
+ npages -= 1 << cur_order;
+ } else {
+ --cur_order;
+ if (cur_order < 0)
+ goto fail;
+ }
+ }
+
+ if (chunk) {
+ chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
+ chunk->npages,
+ PCI_DMA_BIDIRECTIONAL);
+
+ if (chunk->nsg <= 0)
+ goto fail;
+ }
+
+ return icm;
+
+fail:
+ mlx4_free_icm(dev, icm);
+ return NULL;
+}
+
+static int mlx4_MAP_ICM(struct mlx4_dev *dev, struct mlx4_icm *icm, u64 virt)
+{
+ return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM, icm, virt);
+}
+
+int mlx4_UNMAP_ICM(struct mlx4_dev *dev, u64 virt, u32 page_count)
+{
+ return mlx4_cmd(dev, virt, page_count, 0, MLX4_CMD_UNMAP_ICM,
+ MLX4_CMD_TIME_CLASS_B);
+}
+
+int mlx4_MAP_ICM_page(struct mlx4_dev *dev, u64 dma_addr, u64 virt)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ __be64 *inbox;
+ int err;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+ inbox = mailbox->buf;
+
+ inbox[0] = cpu_to_be64(virt);
+ inbox[1] = cpu_to_be64(dma_addr);
+
+ err = mlx4_cmd(dev, mailbox->dma, 1, 0, MLX4_CMD_MAP_ICM,
+ MLX4_CMD_TIME_CLASS_B);
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+
+ if (!err)
+ mlx4_dbg(dev, "Mapped page at %llx to %llx for ICM.\n",
+ (unsigned long long) dma_addr, (unsigned long long) virt);
+
+ return err;
+}
+
+int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm)
+{
+ return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM_AUX, icm, -1);
+}
+
+int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev)
+{
+ return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_ICM_AUX, MLX4_CMD_TIME_CLASS_B);
+}
+
+int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj)
+{
+ int i = (obj & (table->num_obj - 1)) / (MLX4_TABLE_CHUNK_SIZE / table->obj_size);
+ int ret = 0;
+
+ mutex_lock(&table->mutex);
+
+ if (table->icm[i]) {
+ ++table->icm[i]->refcount;
+ goto out;
+ }
+
+ table->icm[i] = mlx4_alloc_icm(dev, MLX4_TABLE_CHUNK_SIZE >> PAGE_SHIFT,
+ (table->lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
+ __GFP_NOWARN);
+ if (!table->icm[i]) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (mlx4_MAP_ICM(dev, table->icm[i], table->virt +
+ (u64) i * MLX4_TABLE_CHUNK_SIZE)) {
+ mlx4_free_icm(dev, table->icm[i]);
+ table->icm[i] = NULL;
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ++table->icm[i]->refcount;
+
+out:
+ mutex_unlock(&table->mutex);
+ return ret;
+}
+
+void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj)
+{
+ int i;
+
+ i = (obj & (table->num_obj - 1)) / (MLX4_TABLE_CHUNK_SIZE / table->obj_size);
+
+ mutex_lock(&table->mutex);
+
+ if (--table->icm[i]->refcount == 0) {
+ mlx4_UNMAP_ICM(dev, table->virt + i * MLX4_TABLE_CHUNK_SIZE,
+ MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE);
+ mlx4_free_icm(dev, table->icm[i]);
+ table->icm[i] = NULL;
+ }
+
+ mutex_unlock(&table->mutex);
+}
+
+void *mlx4_table_find(struct mlx4_icm_table *table, int obj)
+{
+ int idx, offset, i;
+ struct mlx4_icm_chunk *chunk;
+ struct mlx4_icm *icm;
+ struct page *page = NULL;
+
+ if (!table->lowmem)
+ return NULL;
+
+ mutex_lock(&table->mutex);
+
+ idx = obj & (table->num_obj - 1);
+ icm = table->icm[idx / (MLX4_TABLE_CHUNK_SIZE / table->obj_size)];
+ offset = idx % (MLX4_TABLE_CHUNK_SIZE / table->obj_size);
+
+ if (!icm)
+ goto out;
+
+ list_for_each_entry(chunk, &icm->chunk_list, list) {
+ for (i = 0; i < chunk->npages; ++i) {
+ if (chunk->mem[i].length > offset) {
+ page = chunk->mem[i].page;
+ goto out;
+ }
+ offset -= chunk->mem[i].length;
+ }
+ }
+
+out:
+ mutex_unlock(&table->mutex);
+ return page ? lowmem_page_address(page) + offset : NULL;
+}
+
+int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
+ int start, int end)
+{
+ int inc = MLX4_TABLE_CHUNK_SIZE / table->obj_size;
+ int i, err;
+
+ for (i = start; i <= end; i += inc) {
+ err = mlx4_table_get(dev, table, i);
+ if (err)
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ while (i > start) {
+ i -= inc;
+ mlx4_table_put(dev, table, i);
+ }
+
+ return err;
+}
+
+void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
+ int start, int end)
+{
+ int i;
+
+ for (i = start; i <= end; i += MLX4_TABLE_CHUNK_SIZE / table->obj_size)
+ mlx4_table_put(dev, table, i);
+}
+
+int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table,
+ u64 virt, int obj_size, int nobj, int reserved,
+ int use_lowmem)
+{
+ int obj_per_chunk;
+ int num_icm;
+ unsigned chunk_size;
+ int i;
+
+ obj_per_chunk = MLX4_TABLE_CHUNK_SIZE / obj_size;
+ num_icm = (nobj + obj_per_chunk - 1) / obj_per_chunk;
+
+ table->icm = kcalloc(num_icm, sizeof *table->icm, GFP_KERNEL);
+ if (!table->icm)
+ return -ENOMEM;
+ table->virt = virt;
+ table->num_icm = num_icm;
+ table->num_obj = nobj;
+ table->obj_size = obj_size;
+ table->lowmem = use_lowmem;
+ mutex_init(&table->mutex);
+
+ for (i = 0; i * MLX4_TABLE_CHUNK_SIZE < reserved * obj_size; ++i) {
+ chunk_size = MLX4_TABLE_CHUNK_SIZE;
+ if ((i + 1) * MLX4_TABLE_CHUNK_SIZE > nobj * obj_size)
+ chunk_size = PAGE_ALIGN(nobj * obj_size - i * MLX4_TABLE_CHUNK_SIZE);
+
+ table->icm[i] = mlx4_alloc_icm(dev, chunk_size >> PAGE_SHIFT,
+ (use_lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
+ __GFP_NOWARN);
+ if (!table->icm[i])
+ goto err;
+ if (mlx4_MAP_ICM(dev, table->icm[i], virt + i * MLX4_TABLE_CHUNK_SIZE)) {
+ mlx4_free_icm(dev, table->icm[i]);
+ table->icm[i] = NULL;
+ goto err;
+ }
+
+ /*
+ * Add a reference to this ICM chunk so that it never
+ * gets freed (since it contains reserved firmware objects).
+ */
+ ++table->icm[i]->refcount;
+ }
+
+ return 0;
+
+err:
+ for (i = 0; i < num_icm; ++i)
+ if (table->icm[i]) {
+ mlx4_UNMAP_ICM(dev, virt + i * MLX4_TABLE_CHUNK_SIZE,
+ MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE);
+ mlx4_free_icm(dev, table->icm[i]);
+ }
+
+ return -ENOMEM;
+}
+
+void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table)
+{
+ int i;
+
+ for (i = 0; i < table->num_icm; ++i)
+ if (table->icm[i]) {
+ mlx4_UNMAP_ICM(dev, table->virt + i * MLX4_TABLE_CHUNK_SIZE,
+ MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE);
+ mlx4_free_icm(dev, table->icm[i]);
+ }
+
+ kfree(table->icm);
+}
diff --git a/drivers/net/mlx4/icm.h b/drivers/net/mlx4/icm.h
new file mode 100644
index 00000000000..bea223d879a
--- /dev/null
+++ b/drivers/net/mlx4/icm.h
@@ -0,0 +1,135 @@
+/*
+ * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef MLX4_ICM_H
+#define MLX4_ICM_H
+
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/mutex.h>
+
+#define MLX4_ICM_CHUNK_LEN \
+ ((256 - sizeof (struct list_head) - 2 * sizeof (int)) / \
+ (sizeof (struct scatterlist)))
+
+enum {
+ MLX4_ICM_PAGE_SHIFT = 12,
+ MLX4_ICM_PAGE_SIZE = 1 << MLX4_ICM_PAGE_SHIFT,
+};
+
+struct mlx4_icm_chunk {
+ struct list_head list;
+ int npages;
+ int nsg;
+ struct scatterlist mem[MLX4_ICM_CHUNK_LEN];
+};
+
+struct mlx4_icm {
+ struct list_head chunk_list;
+ int refcount;
+};
+
+struct mlx4_icm_iter {
+ struct mlx4_icm *icm;
+ struct mlx4_icm_chunk *chunk;
+ int page_idx;
+};
+
+struct mlx4_dev;
+
+struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, gfp_t gfp_mask);
+void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm);
+
+int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj);
+void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj);
+int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
+ int start, int end);
+void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
+ int start, int end);
+int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table,
+ u64 virt, int obj_size, int nobj, int reserved,
+ int use_lowmem);
+void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table);
+int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj);
+void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj);
+void *mlx4_table_find(struct mlx4_icm_table *table, int obj);
+int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
+ int start, int end);
+void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
+ int start, int end);
+
+static inline void mlx4_icm_first(struct mlx4_icm *icm,
+ struct mlx4_icm_iter *iter)
+{
+ iter->icm = icm;
+ iter->chunk = list_empty(&icm->chunk_list) ?
+ NULL : list_entry(icm->chunk_list.next,
+ struct mlx4_icm_chunk, list);
+ iter->page_idx = 0;
+}
+
+static inline int mlx4_icm_last(struct mlx4_icm_iter *iter)
+{
+ return !iter->chunk;
+}
+
+static inline void mlx4_icm_next(struct mlx4_icm_iter *iter)
+{
+ if (++iter->page_idx >= iter->chunk->nsg) {
+ if (iter->chunk->list.next == &iter->icm->chunk_list) {
+ iter->chunk = NULL;
+ return;
+ }
+
+ iter->chunk = list_entry(iter->chunk->list.next,
+ struct mlx4_icm_chunk, list);
+ iter->page_idx = 0;
+ }
+}
+
+static inline dma_addr_t mlx4_icm_addr(struct mlx4_icm_iter *iter)
+{
+ return sg_dma_address(&iter->chunk->mem[iter->page_idx]);
+}
+
+static inline unsigned long mlx4_icm_size(struct mlx4_icm_iter *iter)
+{
+ return sg_dma_len(&iter->chunk->mem[iter->page_idx]);
+}
+
+int mlx4_UNMAP_ICM(struct mlx4_dev *dev, u64 virt, u32 page_count);
+int mlx4_MAP_ICM_page(struct mlx4_dev *dev, u64 dma_addr, u64 virt);
+int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm);
+int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev);
+
+#endif /* MLX4_ICM_H */
diff --git a/drivers/net/mlx4/intf.c b/drivers/net/mlx4/intf.c
new file mode 100644
index 00000000000..65854f9e9c7
--- /dev/null
+++ b/drivers/net/mlx4/intf.c
@@ -0,0 +1,165 @@
+/*
+ * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/mlx4/driver.h>
+
+#include "mlx4.h"
+
+struct mlx4_device_context {
+ struct list_head list;
+ struct mlx4_interface *intf;
+ void *context;
+};
+
+static LIST_HEAD(intf_list);
+static LIST_HEAD(dev_list);
+static DEFINE_MUTEX(intf_mutex);
+
+static void mlx4_add_device(struct mlx4_interface *intf, struct mlx4_priv *priv)
+{
+ struct mlx4_device_context *dev_ctx;
+
+ dev_ctx = kmalloc(sizeof *dev_ctx, GFP_KERNEL);
+ if (!dev_ctx)
+ return;
+
+ dev_ctx->intf = intf;
+ dev_ctx->context = intf->add(&priv->dev);
+
+ if (dev_ctx->context) {
+ spin_lock_irq(&priv->ctx_lock);
+ list_add_tail(&dev_ctx->list, &priv->ctx_list);
+ spin_unlock_irq(&priv->ctx_lock);
+ } else
+ kfree(dev_ctx);
+}
+
+static void mlx4_remove_device(struct mlx4_interface *intf, struct mlx4_priv *priv)
+{
+ struct mlx4_device_context *dev_ctx;
+
+ list_for_each_entry(dev_ctx, &priv->ctx_list, list)
+ if (dev_ctx->intf == intf) {
+ spin_lock_irq(&priv->ctx_lock);
+ list_del(&dev_ctx->list);
+ spin_unlock_irq(&priv->ctx_lock);
+
+ intf->remove(&priv->dev, dev_ctx->context);
+ kfree(dev_ctx);
+ return;
+ }
+}
+
+int mlx4_register_interface(struct mlx4_interface *intf)
+{
+ struct mlx4_priv *priv;
+
+ if (!intf->add || !intf->remove)
+ return -EINVAL;
+
+ mutex_lock(&intf_mutex);
+
+ list_add_tail(&intf->list, &intf_list);
+ list_for_each_entry(priv, &dev_list, dev_list)
+ mlx4_add_device(intf, priv);
+
+ mutex_unlock(&intf_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx4_register_interface);
+
+void mlx4_unregister_interface(struct mlx4_interface *intf)
+{
+ struct mlx4_priv *priv;
+
+ mutex_lock(&intf_mutex);
+
+ list_for_each_entry(priv, &dev_list, dev_list)
+ mlx4_remove_device(intf, priv);
+
+ list_del(&intf->list);
+
+ mutex_unlock(&intf_mutex);
+}
+EXPORT_SYMBOL_GPL(mlx4_unregister_interface);
+
+void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_event type,
+ int subtype, int port)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_device_context *dev_ctx;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->ctx_lock, flags);
+
+ list_for_each_entry(dev_ctx, &priv->ctx_list, list)
+ if (dev_ctx->intf->event)
+ dev_ctx->intf->event(dev, dev_ctx->context, type,
+ subtype, port);
+
+ spin_unlock_irqrestore(&priv->ctx_lock, flags);
+}
+
+int mlx4_register_device(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_interface *intf;
+
+ INIT_LIST_HEAD(&priv->ctx_list);
+ spin_lock_init(&priv->ctx_lock);
+
+ mutex_lock(&intf_mutex);
+
+ list_add_tail(&priv->dev_list, &dev_list);
+ list_for_each_entry(intf, &intf_list, list)
+ mlx4_add_device(intf, priv);
+
+ mutex_unlock(&intf_mutex);
+
+ return 0;
+}
+
+void mlx4_unregister_device(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_interface *intf;
+
+ mutex_lock(&intf_mutex);
+
+ list_for_each_entry(intf, &intf_list, list)
+ mlx4_remove_device(intf, priv);
+
+ list_del(&priv->dev_list);
+
+ mutex_unlock(&intf_mutex);
+}
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
new file mode 100644
index 00000000000..4debb024eaf
--- /dev/null
+++ b/drivers/net/mlx4/main.c
@@ -0,0 +1,936 @@
+/*
+ * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+
+#include <linux/mlx4/device.h>
+#include <linux/mlx4/doorbell.h>
+
+#include "mlx4.h"
+#include "fw.h"
+#include "icm.h"
+
+MODULE_AUTHOR("Roland Dreier");
+MODULE_DESCRIPTION("Mellanox ConnectX HCA low-level driver");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION(DRV_VERSION);
+
+#ifdef CONFIG_MLX4_DEBUG
+
+int mlx4_debug_level = 0;
+module_param_named(debug_level, mlx4_debug_level, int, 0644);
+MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
+
+#endif /* CONFIG_MLX4_DEBUG */
+
+#ifdef CONFIG_PCI_MSI
+
+static int msi_x;
+module_param(msi_x, int, 0444);
+MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero");
+
+#else /* CONFIG_PCI_MSI */
+
+#define msi_x (0)
+
+#endif /* CONFIG_PCI_MSI */
+
+static const char mlx4_version[] __devinitdata =
+ DRV_NAME ": Mellanox ConnectX core driver v"
+ DRV_VERSION " (" DRV_RELDATE ")\n";
+
+static struct mlx4_profile default_profile = {
+ .num_qp = 1 << 16,
+ .num_srq = 1 << 16,
+ .rdmarc_per_qp = 4,
+ .num_cq = 1 << 16,
+ .num_mcg = 1 << 13,
+ .num_mpt = 1 << 17,
+ .num_mtt = 1 << 20,
+};
+
+static int __devinit mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
+{
+ int err;
+
+ err = mlx4_QUERY_DEV_CAP(dev, dev_cap);
+ if (err) {
+ mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
+ return err;
+ }
+
+ if (dev_cap->min_page_sz > PAGE_SIZE) {
+ mlx4_err(dev, "HCA minimum page size of %d bigger than "
+ "kernel PAGE_SIZE of %ld, aborting.\n",
+ dev_cap->min_page_sz, PAGE_SIZE);
+ return -ENODEV;
+ }
+ if (dev_cap->num_ports > MLX4_MAX_PORTS) {
+ mlx4_err(dev, "HCA has %d ports, but we only support %d, "
+ "aborting.\n",
+ dev_cap->num_ports, MLX4_MAX_PORTS);
+ return -ENODEV;
+ }
+
+ if (dev_cap->uar_size > pci_resource_len(dev->pdev, 2)) {
+ mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than "
+ "PCI resource 2 size of 0x%llx, aborting.\n",
+ dev_cap->uar_size,
+ (unsigned long long) pci_resource_len(dev->pdev, 2));
+ return -ENODEV;
+ }
+
+ dev->caps.num_ports = dev_cap->num_ports;
+ dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE;
+ dev->caps.vl_cap = dev_cap->max_vl;
+ dev->caps.mtu_cap = dev_cap->max_mtu;
+ dev->caps.gid_table_len = dev_cap->max_gids;
+ dev->caps.pkey_table_len = dev_cap->max_pkeys;
+ dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay;
+ dev->caps.bf_reg_size = dev_cap->bf_reg_size;
+ dev->caps.bf_regs_per_page = dev_cap->bf_regs_per_page;
+ dev->caps.max_sq_sg = dev_cap->max_sq_sg;
+ dev->caps.max_rq_sg = dev_cap->max_rq_sg;
+ dev->caps.max_wqes = dev_cap->max_qp_sz;
+ dev->caps.max_qp_init_rdma = dev_cap->max_requester_per_qp;
+ dev->caps.reserved_qps = dev_cap->reserved_qps;
+ dev->caps.max_srq_wqes = dev_cap->max_srq_sz;
+ dev->caps.max_srq_sge = dev_cap->max_rq_sg - 1;
+ dev->caps.reserved_srqs = dev_cap->reserved_srqs;
+ dev->caps.max_sq_desc_sz = dev_cap->max_sq_desc_sz;
+ dev->caps.max_rq_desc_sz = dev_cap->max_rq_desc_sz;
+ dev->caps.num_qp_per_mgm = MLX4_QP_PER_MGM;
+ /*
+ * Subtract 1 from the limit because we need to allocate a
+ * spare CQE so the HCA HW can tell the difference between an
+ * empty CQ and a full CQ.
+ */
+ dev->caps.max_cqes = dev_cap->max_cq_sz - 1;
+ dev->caps.reserved_cqs = dev_cap->reserved_cqs;
+ dev->caps.reserved_eqs = dev_cap->reserved_eqs;
+ dev->caps.reserved_mtts = dev_cap->reserved_mtts;
+ dev->caps.reserved_mrws = dev_cap->reserved_mrws;
+ dev->caps.reserved_uars = dev_cap->reserved_uars;
+ dev->caps.reserved_pds = dev_cap->reserved_pds;
+ dev->caps.port_width_cap = dev_cap->max_port_width;
+ dev->caps.mtt_entry_sz = MLX4_MTT_ENTRY_PER_SEG * dev_cap->mtt_entry_sz;
+ dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1);
+ dev->caps.flags = dev_cap->flags;
+ dev->caps.stat_rate_support = dev_cap->stat_rate_support;
+
+ return 0;
+}
+
+static int __devinit mlx4_load_fw(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int err;
+
+ priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages,
+ GFP_HIGHUSER | __GFP_NOWARN);
+ if (!priv->fw.fw_icm) {
+ mlx4_err(dev, "Couldn't allocate FW area, aborting.\n");
+ return -ENOMEM;
+ }
+
+ err = mlx4_MAP_FA(dev, priv->fw.fw_icm);
+ if (err) {
+ mlx4_err(dev, "MAP_FA command failed, aborting.\n");
+ goto err_free;
+ }
+
+ err = mlx4_RUN_FW(dev);
+ if (err) {
+ mlx4_err(dev, "RUN_FW command failed, aborting.\n");
+ goto err_unmap_fa;
+ }
+
+ return 0;
+
+err_unmap_fa:
+ mlx4_UNMAP_FA(dev);
+
+err_free:
+ mlx4_free_icm(dev, priv->fw.fw_icm);
+ return err;
+}
+
+static int __devinit mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base,
+ int cmpt_entry_sz)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int err;
+
+ err = mlx4_init_icm_table(dev, &priv->qp_table.cmpt_table,
+ cmpt_base +
+ ((u64) (MLX4_CMPT_TYPE_QP *
+ cmpt_entry_sz) << MLX4_CMPT_SHIFT),
+ cmpt_entry_sz, dev->caps.num_qps,
+ dev->caps.reserved_qps, 0);
+ if (err)
+ goto err;
+
+ err = mlx4_init_icm_table(dev, &priv->srq_table.cmpt_table,
+ cmpt_base +
+ ((u64) (MLX4_CMPT_TYPE_SRQ *
+ cmpt_entry_sz) << MLX4_CMPT_SHIFT),
+ cmpt_entry_sz, dev->caps.num_srqs,
+ dev->caps.reserved_srqs, 0);
+ if (err)
+ goto err_qp;
+
+ err = mlx4_init_icm_table(dev, &priv->cq_table.cmpt_table,
+ cmpt_base +
+ ((u64) (MLX4_CMPT_TYPE_CQ *
+ cmpt_entry_sz) << MLX4_CMPT_SHIFT),
+ cmpt_entry_sz, dev->caps.num_cqs,
+ dev->caps.reserved_cqs, 0);
+ if (err)
+ goto err_srq;
+
+ err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table,
+ cmpt_base +
+ ((u64) (MLX4_CMPT_TYPE_EQ *
+ cmpt_entry_sz) << MLX4_CMPT_SHIFT),
+ cmpt_entry_sz,
+ roundup_pow_of_two(MLX4_NUM_EQ +
+ dev->caps.reserved_eqs),
+ MLX4_NUM_EQ + dev->caps.reserved_eqs, 0);
+ if (err)
+ goto err_cq;
+
+ return 0;
+
+err_cq:
+ mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
+
+err_srq:
+ mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
+
+err_qp:
+ mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
+
+err:
+ return err;
+}
+
+static int __devinit mlx4_init_icm(struct mlx4_dev *dev,
+ struct mlx4_dev_cap *dev_cap,
+ struct mlx4_init_hca_param *init_hca,
+ u64 icm_size)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ u64 aux_pages;
+ int err;
+
+ err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages);
+ if (err) {
+ mlx4_err(dev, "SET_ICM_SIZE command failed, aborting.\n");
+ return err;
+ }
+
+ mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory.\n",
+ (unsigned long long) icm_size >> 10,
+ (unsigned long long) aux_pages << 2);
+
+ priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages,
+ GFP_HIGHUSER | __GFP_NOWARN);
+ if (!priv->fw.aux_icm) {
+ mlx4_err(dev, "Couldn't allocate aux memory, aborting.\n");
+ return -ENOMEM;
+ }
+
+ err = mlx4_MAP_ICM_AUX(dev, priv->fw.aux_icm);
+ if (err) {
+ mlx4_err(dev, "MAP_ICM_AUX command failed, aborting.\n");
+ goto err_free_aux;
+ }
+
+ err = mlx4_init_cmpt_table(dev, init_hca->cmpt_base, dev_cap->cmpt_entry_sz);
+ if (err) {
+ mlx4_err(dev, "Failed to map cMPT context memory, aborting.\n");
+ goto err_unmap_aux;
+ }
+
+ err = mlx4_map_eq_icm(dev, init_hca->eqc_base);
+ if (err) {
+ mlx4_err(dev, "Failed to map EQ context memory, aborting.\n");
+ goto err_unmap_cmpt;
+ }
+
+ err = mlx4_init_icm_table(dev, &priv->mr_table.mtt_table,
+ init_hca->mtt_base,
+ dev->caps.mtt_entry_sz,
+ dev->caps.num_mtt_segs,
+ dev->caps.reserved_mtts, 1);
+ if (err) {
+ mlx4_err(dev, "Failed to map MTT context memory, aborting.\n");
+ goto err_unmap_eq;
+ }
+
+ err = mlx4_init_icm_table(dev, &priv->mr_table.dmpt_table,
+ init_hca->dmpt_base,
+ dev_cap->dmpt_entry_sz,
+ dev->caps.num_mpts,
+ dev->caps.reserved_mrws, 1);
+ if (err) {
+ mlx4_err(dev, "Failed to map dMPT context memory, aborting.\n");
+ goto err_unmap_mtt;
+ }
+
+ err = mlx4_init_icm_table(dev, &priv->qp_table.qp_table,
+ init_hca->qpc_base,
+ dev_cap->qpc_entry_sz,
+ dev->caps.num_qps,
+ dev->caps.reserved_qps, 0);
+ if (err) {
+ mlx4_err(dev, "Failed to map QP context memory, aborting.\n");
+ goto err_unmap_dmpt;
+ }
+
+ err = mlx4_init_icm_table(dev, &priv->qp_table.auxc_table,
+ init_hca->auxc_base,
+ dev_cap->aux_entry_sz,
+ dev->caps.num_qps,
+ dev->caps.reserved_qps, 0);
+ if (err) {
+ mlx4_err(dev, "Failed to map AUXC context memory, aborting.\n");
+ goto err_unmap_qp;
+ }
+
+ err = mlx4_init_icm_table(dev, &priv->qp_table.altc_table,
+ init_hca->altc_base,
+ dev_cap->altc_entry_sz,
+ dev->caps.num_qps,
+ dev->caps.reserved_qps, 0);
+ if (err) {
+ mlx4_err(dev, "Failed to map ALTC context memory, aborting.\n");
+ goto err_unmap_auxc;
+ }
+
+ err = mlx4_init_icm_table(dev, &priv->qp_table.rdmarc_table,
+ init_hca->rdmarc_base,
+ dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift,
+ dev->caps.num_qps,
+ dev->caps.reserved_qps, 0);
+ if (err) {
+ mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n");
+ goto err_unmap_altc;
+ }
+
+ err = mlx4_init_icm_table(dev, &priv->cq_table.table,
+ init_hca->cqc_base,
+ dev_cap->cqc_entry_sz,
+ dev->caps.num_cqs,
+ dev->caps.reserved_cqs, 0);
+ if (err) {
+ mlx4_err(dev, "Failed to map CQ context memory, aborting.\n");
+ goto err_unmap_rdmarc;
+ }
+
+ err = mlx4_init_icm_table(dev, &priv->srq_table.table,
+ init_hca->srqc_base,
+ dev_cap->srq_entry_sz,
+ dev->caps.num_srqs,
+ dev->caps.reserved_srqs, 0);
+ if (err) {
+ mlx4_err(dev, "Failed to map SRQ context memory, aborting.\n");
+ goto err_unmap_cq;
+ }
+
+ /*
+ * It's not strictly required, but for simplicity just map the
+ * whole multicast group table now. The table isn't very big
+ * and it's a lot easier than trying to track ref counts.
+ */
+ err = mlx4_init_icm_table(dev, &priv->mcg_table.table,
+ init_hca->mc_base, MLX4_MGM_ENTRY_SIZE,
+ dev->caps.num_mgms + dev->caps.num_amgms,
+ dev->caps.num_mgms + dev->caps.num_amgms,
+ 0);
+ if (err) {
+ mlx4_err(dev, "Failed to map MCG context memory, aborting.\n");
+ goto err_unmap_srq;
+ }
+
+ return 0;
+
+err_unmap_srq:
+ mlx4_cleanup_icm_table(dev, &priv->srq_table.table);
+
+err_unmap_cq:
+ mlx4_cleanup_icm_table(dev, &priv->cq_table.table);
+
+err_unmap_rdmarc:
+ mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table);
+
+err_unmap_altc:
+ mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table);
+
+err_unmap_auxc:
+ mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table);
+
+err_unmap_qp:
+ mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);
+
+err_unmap_dmpt:
+ mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);
+
+err_unmap_mtt:
+ mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
+
+err_unmap_eq:
+ mlx4_unmap_eq_icm(dev);
+
+err_unmap_cmpt:
+ mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
+ mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
+ mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
+ mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
+
+err_unmap_aux:
+ mlx4_UNMAP_ICM_AUX(dev);
+
+err_free_aux:
+ mlx4_free_icm(dev, priv->fw.aux_icm);
+
+ return err;
+}
+
+static void mlx4_free_icms(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ mlx4_cleanup_icm_table(dev, &priv->mcg_table.table);
+ mlx4_cleanup_icm_table(dev, &priv->srq_table.table);
+ mlx4_cleanup_icm_table(dev, &priv->cq_table.table);
+ mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table);
+ mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table);
+ mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table);
+ mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);
+ mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);
+ mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
+ mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
+ mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
+ mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
+ mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
+ mlx4_unmap_eq_icm(dev);
+
+ mlx4_UNMAP_ICM_AUX(dev);
+ mlx4_free_icm(dev, priv->fw.aux_icm);
+}
+
+static void mlx4_close_hca(struct mlx4_dev *dev)
+{
+ mlx4_CLOSE_HCA(dev, 0);
+ mlx4_free_icms(dev);
+ mlx4_UNMAP_FA(dev);
+ mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm);
+}
+
+static int __devinit mlx4_init_hca(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_adapter adapter;
+ struct mlx4_dev_cap dev_cap;
+ struct mlx4_profile profile;
+ struct mlx4_init_hca_param init_hca;
+ u64 icm_size;
+ int err;
+
+ err = mlx4_QUERY_FW(dev);
+ if (err) {
+ mlx4_err(dev, "QUERY_FW command failed, aborting.\n");
+ return err;
+ }
+
+ err = mlx4_load_fw(dev);
+ if (err) {
+ mlx4_err(dev, "Failed to start FW, aborting.\n");
+ return err;
+ }
+
+ err = mlx4_dev_cap(dev, &dev_cap);
+ if (err) {
+ mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
+ goto err_stop_fw;
+ }
+
+ profile = default_profile;
+
+ icm_size = mlx4_make_profile(dev, &profile, &dev_cap, &init_hca);
+ if ((long long) icm_size < 0) {
+ err = icm_size;
+ goto err_stop_fw;
+ }
+
+ init_hca.log_uar_sz = ilog2(dev->caps.num_uars);
+
+ err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size);
+ if (err)
+ goto err_stop_fw;
+
+ err = mlx4_INIT_HCA(dev, &init_hca);
+ if (err) {
+ mlx4_err(dev, "INIT_HCA command failed, aborting.\n");
+ goto err_free_icm;
+ }
+
+ err = mlx4_QUERY_ADAPTER(dev, &adapter);
+ if (err) {
+ mlx4_err(dev, "QUERY_ADAPTER command failed, aborting.\n");
+ goto err_close;
+ }
+
+ priv->eq_table.inta_pin = adapter.inta_pin;
+ priv->rev_id = adapter.revision_id;
+ memcpy(priv->board_id, adapter.board_id, sizeof priv->board_id);
+
+ return 0;
+
+err_close:
+ mlx4_close_hca(dev);
+
+err_free_icm:
+ mlx4_free_icms(dev);
+
+err_stop_fw:
+ mlx4_UNMAP_FA(dev);
+ mlx4_free_icm(dev, priv->fw.fw_icm);
+
+ return err;
+}
+
+static int __devinit mlx4_setup_hca(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int err;
+
+ MLX4_INIT_DOORBELL_LOCK(&priv->doorbell_lock);
+
+ err = mlx4_init_uar_table(dev);
+ if (err) {
+ mlx4_err(dev, "Failed to initialize "
+ "user access region table, aborting.\n");
+ return err;
+ }
+
+ err = mlx4_uar_alloc(dev, &priv->driver_uar);
+ if (err) {
+ mlx4_err(dev, "Failed to allocate driver access region, "
+ "aborting.\n");
+ goto err_uar_table_free;
+ }
+
+ priv->kar = ioremap(priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
+ if (!priv->kar) {
+ mlx4_err(dev, "Couldn't map kernel access region, "
+ "aborting.\n");
+ err = -ENOMEM;
+ goto err_uar_free;
+ }
+
+ err = mlx4_init_pd_table(dev);
+ if (err) {
+ mlx4_err(dev, "Failed to initialize "
+ "protection domain table, aborting.\n");
+ goto err_kar_unmap;
+ }
+
+ err = mlx4_init_mr_table(dev);
+ if (err) {
+ mlx4_err(dev, "Failed to initialize "
+ "memory region table, aborting.\n");
+ goto err_pd_table_free;
+ }
+
+ mlx4_map_catas_buf(dev);
+
+ err = mlx4_init_eq_table(dev);
+ if (err) {
+ mlx4_err(dev, "Failed to initialize "
+ "event queue table, aborting.\n");
+ goto err_catas_buf;
+ }
+
+ err = mlx4_cmd_use_events(dev);
+ if (err) {
+ mlx4_err(dev, "Failed to switch to event-driven "
+ "firmware commands, aborting.\n");
+ goto err_eq_table_free;
+ }
+
+ err = mlx4_NOP(dev);
+ if (err) {
+ mlx4_err(dev, "NOP command failed to generate interrupt "
+ "(IRQ %d), aborting.\n",
+ priv->eq_table.eq[MLX4_EQ_ASYNC].irq);
+ if (dev->flags & MLX4_FLAG_MSI_X)
+ mlx4_err(dev, "Try again with MSI-X disabled.\n");
+ else
+ mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n");
+
+ goto err_cmd_poll;
+ }
+
+ mlx4_dbg(dev, "NOP command IRQ test passed\n");
+
+ err = mlx4_init_cq_table(dev);
+ if (err) {
+ mlx4_err(dev, "Failed to initialize "
+ "completion queue table, aborting.\n");
+ goto err_cmd_poll;
+ }
+
+ err = mlx4_init_srq_table(dev);
+ if (err) {
+ mlx4_err(dev, "Failed to initialize "
+ "shared receive queue table, aborting.\n");
+ goto err_cq_table_free;
+ }
+
+ err = mlx4_init_qp_table(dev);
+ if (err) {
+ mlx4_err(dev, "Failed to initialize "
+ "queue pair table, aborting.\n");
+ goto err_srq_table_free;
+ }
+
+ err = mlx4_init_mcg_table(dev);
+ if (err) {
+ mlx4_err(dev, "Failed to initialize "
+ "multicast group table, aborting.\n");
+ goto err_qp_table_free;
+ }
+
+ return 0;
+
+err_qp_table_free:
+ mlx4_cleanup_qp_table(dev);
+
+err_srq_table_free:
+ mlx4_cleanup_srq_table(dev);
+
+err_cq_table_free:
+ mlx4_cleanup_cq_table(dev);
+
+err_cmd_poll:
+ mlx4_cmd_use_polling(dev);
+
+err_eq_table_free:
+ mlx4_cleanup_eq_table(dev);
+
+err_catas_buf:
+ mlx4_unmap_catas_buf(dev);
+ mlx4_cleanup_mr_table(dev);
+
+err_pd_table_free:
+ mlx4_cleanup_pd_table(dev);
+
+err_kar_unmap:
+ iounmap(priv->kar);
+
+err_uar_free:
+ mlx4_uar_free(dev, &priv->driver_uar);
+
+err_uar_table_free:
+ mlx4_cleanup_uar_table(dev);
+ return err;
+}
+
+static void __devinit mlx4_enable_msi_x(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct msix_entry entries[MLX4_NUM_EQ];
+ int err;
+ int i;
+
+ if (msi_x) {
+ for (i = 0; i < MLX4_NUM_EQ; ++i)
+ entries[i].entry = i;
+
+ err = pci_enable_msix(dev->pdev, entries, ARRAY_SIZE(entries));
+ if (err) {
+ if (err > 0)
+ mlx4_info(dev, "Only %d MSI-X vectors available, "
+ "not using MSI-X\n", err);
+ goto no_msi;
+ }
+
+ for (i = 0; i < MLX4_NUM_EQ; ++i)
+ priv->eq_table.eq[i].irq = entries[i].vector;
+
+ dev->flags |= MLX4_FLAG_MSI_X;
+ return;
+ }
+
+no_msi:
+ for (i = 0; i < MLX4_NUM_EQ; ++i)
+ priv->eq_table.eq[i].irq = dev->pdev->irq;
+}
+
+static int __devinit mlx4_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ static int mlx4_version_printed;
+ struct mlx4_priv *priv;
+ struct mlx4_dev *dev;
+ int err;
+
+ if (!mlx4_version_printed) {
+ printk(KERN_INFO "%s", mlx4_version);
+ ++mlx4_version_printed;
+ }
+
+ printk(KERN_INFO PFX "Initializing %s\n",
+ pci_name(pdev));
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "Cannot enable PCI device, "
+ "aborting.\n");
+ return err;
+ }
+
+ /*
+ * Check for BARs. We expect 0: 1MB, 2: 8MB, 4: DDR (may not
+ * be present)
+ */
+ if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
+ pci_resource_len(pdev, 0) != 1 << 20) {
+ dev_err(&pdev->dev, "Missing DCS, aborting.\n");
+ err = -ENODEV;
+ goto err_disable_pdev;
+ }
+ if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
+ dev_err(&pdev->dev, "Missing UAR, aborting.\n");
+ err = -ENODEV;
+ goto err_disable_pdev;
+ }
+
+ err = pci_request_region(pdev, 0, DRV_NAME);
+ if (err) {
+ dev_err(&pdev->dev, "Cannot request control region, aborting.\n");
+ goto err_disable_pdev;
+ }
+
+ err = pci_request_region(pdev, 2, DRV_NAME);
+ if (err) {
+ dev_err(&pdev->dev, "Cannot request UAR region, aborting.\n");
+ goto err_release_bar0;
+ }
+
+ pci_set_master(pdev);
+
+ err = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
+ if (err) {
+ dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n");
+ err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
+ if (err) {
+ dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n");
+ goto err_release_bar2;
+ }
+ }
+ err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
+ if (err) {
+ dev_warn(&pdev->dev, "Warning: couldn't set 64-bit "
+ "consistent PCI DMA mask.\n");
+ err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
+ if (err) {
+ dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, "
+ "aborting.\n");
+ goto err_release_bar2;
+ }
+ }
+
+ priv = kzalloc(sizeof *priv, GFP_KERNEL);
+ if (!priv) {
+ dev_err(&pdev->dev, "Device struct alloc failed, "
+ "aborting.\n");
+ err = -ENOMEM;
+ goto err_release_bar2;
+ }
+
+ dev = &priv->dev;
+ dev->pdev = pdev;
+
+ /*
+ * Now reset the HCA before we touch the PCI capabilities or
+ * attempt a firmware command, since a boot ROM may have left
+ * the HCA in an undefined state.
+ */
+ err = mlx4_reset(dev);
+ if (err) {
+ mlx4_err(dev, "Failed to reset HCA, aborting.\n");
+ goto err_free_dev;
+ }
+
+ mlx4_enable_msi_x(dev);
+
+ if (mlx4_cmd_init(dev)) {
+ mlx4_err(dev, "Failed to init command interface, aborting.\n");
+ goto err_free_dev;
+ }
+
+ err = mlx4_init_hca(dev);
+ if (err)
+ goto err_cmd;
+
+ err = mlx4_setup_hca(dev);
+ if (err)
+ goto err_close;
+
+ err = mlx4_register_device(dev);
+ if (err)
+ goto err_cleanup;
+
+ pci_set_drvdata(pdev, dev);
+
+ return 0;
+
+err_cleanup:
+ mlx4_cleanup_mcg_table(dev);
+ mlx4_cleanup_qp_table(dev);
+ mlx4_cleanup_srq_table(dev);
+ mlx4_cleanup_cq_table(dev);
+ mlx4_cmd_use_polling(dev);
+ mlx4_cleanup_eq_table(dev);
+
+ mlx4_unmap_catas_buf(dev);
+
+ mlx4_cleanup_mr_table(dev);
+ mlx4_cleanup_pd_table(dev);
+ mlx4_cleanup_uar_table(dev);
+
+err_close:
+ mlx4_close_hca(dev);
+
+err_cmd:
+ mlx4_cmd_cleanup(dev);
+
+err_free_dev:
+ if (dev->flags & MLX4_FLAG_MSI_X)
+ pci_disable_msix(pdev);
+
+ kfree(priv);
+
+err_release_bar2:
+ pci_release_region(pdev, 2);
+
+err_release_bar0:
+ pci_release_region(pdev, 0);
+
+err_disable_pdev:
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ return err;
+}
+
+static void __devexit mlx4_remove_one(struct pci_dev *pdev)
+{
+ struct mlx4_dev *dev = pci_get_drvdata(pdev);
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int p;
+
+ if (dev) {
+ mlx4_unregister_device(dev);
+
+ for (p = 1; p <= dev->caps.num_ports; ++p)
+ mlx4_CLOSE_PORT(dev, p);
+
+ mlx4_cleanup_mcg_table(dev);
+ mlx4_cleanup_qp_table(dev);
+ mlx4_cleanup_srq_table(dev);
+ mlx4_cleanup_cq_table(dev);
+ mlx4_cmd_use_polling(dev);
+ mlx4_cleanup_eq_table(dev);
+
+ mlx4_unmap_catas_buf(dev);
+
+ mlx4_cleanup_mr_table(dev);
+ mlx4_cleanup_pd_table(dev);
+
+ iounmap(priv->kar);
+ mlx4_uar_free(dev, &priv->driver_uar);
+ mlx4_cleanup_uar_table(dev);
+ mlx4_close_hca(dev);
+ mlx4_cmd_cleanup(dev);
+
+ if (dev->flags & MLX4_FLAG_MSI_X)
+ pci_disable_msix(pdev);
+
+ kfree(priv);
+ pci_release_region(pdev, 2);
+ pci_release_region(pdev, 0);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ }
+}
+
+static struct pci_device_id mlx4_pci_table[] = {
+ { PCI_VDEVICE(MELLANOX, 0x6340) }, /* MT25408 "Hermon" SDR */
+ { PCI_VDEVICE(MELLANOX, 0x634a) }, /* MT25408 "Hermon" DDR */
+ { PCI_VDEVICE(MELLANOX, 0x6354) }, /* MT25408 "Hermon" QDR */
+ { 0, }
+};
+
+MODULE_DEVICE_TABLE(pci, mlx4_pci_table);
+
+static struct pci_driver mlx4_driver = {
+ .name = DRV_NAME,
+ .id_table = mlx4_pci_table,
+ .probe = mlx4_init_one,
+ .remove = __devexit_p(mlx4_remove_one)
+};
+
+static int __init mlx4_init(void)
+{
+ int ret;
+
+ ret = pci_register_driver(&mlx4_driver);
+ return ret < 0 ? ret : 0;
+}
+
+static void __exit mlx4_cleanup(void)
+{
+ pci_unregister_driver(&mlx4_driver);
+}
+
+module_init(mlx4_init);
+module_exit(mlx4_cleanup);
diff --git a/drivers/net/mlx4/mcg.c b/drivers/net/mlx4/mcg.c
new file mode 100644
index 00000000000..672024a0ee7
--- /dev/null
+++ b/drivers/net/mlx4/mcg.c
@@ -0,0 +1,380 @@
+/*
+ * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+
+#include <linux/mlx4/cmd.h>
+
+#include "mlx4.h"
+
+struct mlx4_mgm {
+ __be32 next_gid_index;
+ __be32 members_count;
+ u32 reserved[2];
+ u8 gid[16];
+ __be32 qp[MLX4_QP_PER_MGM];
+};
+
+static const u8 zero_gid[16]; /* automatically initialized to 0 */
+
+static int mlx4_READ_MCG(struct mlx4_dev *dev, int index,
+ struct mlx4_cmd_mailbox *mailbox)
+{
+ return mlx4_cmd_box(dev, 0, mailbox->dma, index, 0, MLX4_CMD_READ_MCG,
+ MLX4_CMD_TIME_CLASS_A);
+}
+
+static int mlx4_WRITE_MCG(struct mlx4_dev *dev, int index,
+ struct mlx4_cmd_mailbox *mailbox)
+{
+ return mlx4_cmd(dev, mailbox->dma, index, 0, MLX4_CMD_WRITE_MCG,
+ MLX4_CMD_TIME_CLASS_A);
+}
+
+static int mlx4_MGID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
+ u16 *hash)
+{
+ u64 imm;
+ int err;
+
+ err = mlx4_cmd_imm(dev, mailbox->dma, &imm, 0, 0, MLX4_CMD_MGID_HASH,
+ MLX4_CMD_TIME_CLASS_A);
+
+ if (!err)
+ *hash = imm;
+
+ return err;
+}
+
+/*
+ * Caller must hold MCG table semaphore. gid and mgm parameters must
+ * be properly aligned for command interface.
+ *
+ * Returns 0 unless a firmware command error occurs.
+ *
+ * If GID is found in MGM or MGM is empty, *index = *hash, *prev = -1
+ * and *mgm holds MGM entry.
+ *
+ * if GID is found in AMGM, *index = index in AMGM, *prev = index of
+ * previous entry in hash chain and *mgm holds AMGM entry.
+ *
+ * If no AMGM exists for given gid, *index = -1, *prev = index of last
+ * entry in hash chain and *mgm holds end of hash chain.
+ */
+static int find_mgm(struct mlx4_dev *dev,
+ u8 *gid, struct mlx4_cmd_mailbox *mgm_mailbox,
+ u16 *hash, int *prev, int *index)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_mgm *mgm = mgm_mailbox->buf;
+ u8 *mgid;
+ int err;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return -ENOMEM;
+ mgid = mailbox->buf;
+
+ memcpy(mgid, gid, 16);
+
+ err = mlx4_MGID_HASH(dev, mailbox, hash);
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ if (err)
+ return err;
+
+ if (0)
+ mlx4_dbg(dev, "Hash for %04x:%04x:%04x:%04x:"
+ "%04x:%04x:%04x:%04x is %04x\n",
+ be16_to_cpu(((__be16 *) gid)[0]),
+ be16_to_cpu(((__be16 *) gid)[1]),
+ be16_to_cpu(((__be16 *) gid)[2]),
+ be16_to_cpu(((__be16 *) gid)[3]),
+ be16_to_cpu(((__be16 *) gid)[4]),
+ be16_to_cpu(((__be16 *) gid)[5]),
+ be16_to_cpu(((__be16 *) gid)[6]),
+ be16_to_cpu(((__be16 *) gid)[7]),
+ *hash);
+
+ *index = *hash;
+ *prev = -1;
+
+ do {
+ err = mlx4_READ_MCG(dev, *index, mgm_mailbox);
+ if (err)
+ return err;
+
+ if (!memcmp(mgm->gid, zero_gid, 16)) {
+ if (*index != *hash) {
+ mlx4_err(dev, "Found zero MGID in AMGM.\n");
+ err = -EINVAL;
+ }
+ return err;
+ }
+
+ if (!memcmp(mgm->gid, gid, 16))
+ return err;
+
+ *prev = *index;
+ *index = be32_to_cpu(mgm->next_gid_index) >> 6;
+ } while (*index);
+
+ *index = -1;
+ return err;
+}
+
+int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16])
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_mgm *mgm;
+ u32 members_count;
+ u16 hash;
+ int index, prev;
+ int link = 0;
+ int i;
+ int err;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+ mgm = mailbox->buf;
+
+ mutex_lock(&priv->mcg_table.mutex);
+
+ err = find_mgm(dev, gid, mailbox, &hash, &prev, &index);
+ if (err)
+ goto out;
+
+ if (index != -1) {
+ if (!memcmp(mgm->gid, zero_gid, 16))
+ memcpy(mgm->gid, gid, 16);
+ } else {
+ link = 1;
+
+ index = mlx4_bitmap_alloc(&priv->mcg_table.bitmap);
+ if (index == -1) {
+ mlx4_err(dev, "No AMGM entries left\n");
+ err = -ENOMEM;
+ goto out;
+ }
+ index += dev->caps.num_mgms;
+
+ err = mlx4_READ_MCG(dev, index, mailbox);
+ if (err)
+ goto out;
+
+ memset(mgm, 0, sizeof *mgm);
+ memcpy(mgm->gid, gid, 16);
+ }
+
+ members_count = be32_to_cpu(mgm->members_count);
+ if (members_count == MLX4_QP_PER_MGM) {
+ mlx4_err(dev, "MGM at index %x is full.\n", index);
+ err = -ENOMEM;
+ goto out;
+ }
+
+ for (i = 0; i < members_count; ++i)
+ if (mgm->qp[i] == cpu_to_be32(qp->qpn)) {
+ mlx4_dbg(dev, "QP %06x already a member of MGM\n", qp->qpn);
+ err = 0;
+ goto out;
+ }
+
+ mgm->qp[members_count++] = cpu_to_be32(qp->qpn);
+ mgm->members_count = cpu_to_be32(members_count);
+
+ err = mlx4_WRITE_MCG(dev, index, mailbox);
+ if (err)
+ goto out;
+
+ if (!link)
+ goto out;
+
+ err = mlx4_READ_MCG(dev, prev, mailbox);
+ if (err)
+ goto out;
+
+ mgm->next_gid_index = cpu_to_be32(index << 6);
+
+ err = mlx4_WRITE_MCG(dev, prev, mailbox);
+ if (err)
+ goto out;
+
+out:
+ if (err && link && index != -1) {
+ if (index < dev->caps.num_mgms)
+ mlx4_warn(dev, "Got AMGM index %d < %d",
+ index, dev->caps.num_mgms);
+ else
+ mlx4_bitmap_free(&priv->mcg_table.bitmap,
+ index - dev->caps.num_mgms);
+ }
+ mutex_unlock(&priv->mcg_table.mutex);
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_multicast_attach);
+
+int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16])
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_mgm *mgm;
+ u32 members_count;
+ u16 hash;
+ int prev, index;
+ int i, loc;
+ int err;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+ mgm = mailbox->buf;
+
+ mutex_lock(&priv->mcg_table.mutex);
+
+ err = find_mgm(dev, gid, mailbox, &hash, &prev, &index);
+ if (err)
+ goto out;
+
+ if (index == -1) {
+ mlx4_err(dev, "MGID %04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x "
+ "not found\n",
+ be16_to_cpu(((__be16 *) gid)[0]),
+ be16_to_cpu(((__be16 *) gid)[1]),
+ be16_to_cpu(((__be16 *) gid)[2]),
+ be16_to_cpu(((__be16 *) gid)[3]),
+ be16_to_cpu(((__be16 *) gid)[4]),
+ be16_to_cpu(((__be16 *) gid)[5]),
+ be16_to_cpu(((__be16 *) gid)[6]),
+ be16_to_cpu(((__be16 *) gid)[7]));
+ err = -EINVAL;
+ goto out;
+ }
+
+ members_count = be32_to_cpu(mgm->members_count);
+ for (loc = -1, i = 0; i < members_count; ++i)
+ if (mgm->qp[i] == cpu_to_be32(qp->qpn))
+ loc = i;
+
+ if (loc == -1) {
+ mlx4_err(dev, "QP %06x not found in MGM\n", qp->qpn);
+ err = -EINVAL;
+ goto out;
+ }
+
+
+ mgm->members_count = cpu_to_be32(--members_count);
+ mgm->qp[loc] = mgm->qp[i - 1];
+ mgm->qp[i - 1] = 0;
+
+ err = mlx4_WRITE_MCG(dev, index, mailbox);
+ if (err)
+ goto out;
+
+ if (i != 1)
+ goto out;
+
+ if (prev == -1) {
+ /* Remove entry from MGM */
+ int amgm_index = be32_to_cpu(mgm->next_gid_index) >> 6;
+ if (amgm_index) {
+ err = mlx4_READ_MCG(dev, amgm_index, mailbox);
+ if (err)
+ goto out;
+ } else
+ memset(mgm->gid, 0, 16);
+
+ err = mlx4_WRITE_MCG(dev, index, mailbox);
+ if (err)
+ goto out;
+
+ if (amgm_index) {
+ if (amgm_index < dev->caps.num_mgms)
+ mlx4_warn(dev, "MGM entry %d had AMGM index %d < %d",
+ index, amgm_index, dev->caps.num_mgms);
+ else
+ mlx4_bitmap_free(&priv->mcg_table.bitmap,
+ amgm_index - dev->caps.num_mgms);
+ }
+ } else {
+ /* Remove entry from AMGM */
+ int cur_next_index = be32_to_cpu(mgm->next_gid_index) >> 6;
+ err = mlx4_READ_MCG(dev, prev, mailbox);
+ if (err)
+ goto out;
+
+ mgm->next_gid_index = cpu_to_be32(cur_next_index << 6);
+
+ err = mlx4_WRITE_MCG(dev, prev, mailbox);
+ if (err)
+ goto out;
+
+ if (index < dev->caps.num_mgms)
+ mlx4_warn(dev, "entry %d had next AMGM index %d < %d",
+ prev, index, dev->caps.num_mgms);
+ else
+ mlx4_bitmap_free(&priv->mcg_table.bitmap,
+ index - dev->caps.num_mgms);
+ }
+
+out:
+ mutex_unlock(&priv->mcg_table.mutex);
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_multicast_detach);
+
+int __devinit mlx4_init_mcg_table(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int err;
+
+ err = mlx4_bitmap_init(&priv->mcg_table.bitmap,
+ dev->caps.num_amgms, dev->caps.num_amgms - 1, 0);
+ if (err)
+ return err;
+
+ mutex_init(&priv->mcg_table.mutex);
+
+ return 0;
+}
+
+void mlx4_cleanup_mcg_table(struct mlx4_dev *dev)
+{
+ mlx4_bitmap_cleanup(&mlx4_priv(dev)->mcg_table.bitmap);
+}
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h
new file mode 100644
index 00000000000..9befbae3d19
--- /dev/null
+++ b/drivers/net/mlx4/mlx4.h
@@ -0,0 +1,348 @@
+/*
+ * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved.
+ * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef MLX4_H
+#define MLX4_H
+
+#include <linux/radix-tree.h>
+
+#include <linux/mlx4/device.h>
+#include <linux/mlx4/doorbell.h>
+
+#define DRV_NAME "mlx4_core"
+#define PFX DRV_NAME ": "
+#define DRV_VERSION "0.01"
+#define DRV_RELDATE "May 1, 2007"
+
+enum {
+ MLX4_HCR_BASE = 0x80680,
+ MLX4_HCR_SIZE = 0x0001c,
+ MLX4_CLR_INT_SIZE = 0x00008
+};
+
+enum {
+ MLX4_BOARD_ID_LEN = 64
+};
+
+enum {
+ MLX4_MGM_ENTRY_SIZE = 0x40,
+ MLX4_QP_PER_MGM = 4 * (MLX4_MGM_ENTRY_SIZE / 16 - 2),
+ MLX4_MTT_ENTRY_PER_SEG = 8
+};
+
+enum {
+ MLX4_EQ_ASYNC,
+ MLX4_EQ_COMP,
+ MLX4_EQ_CATAS,
+ MLX4_NUM_EQ
+};
+
+enum {
+ MLX4_NUM_PDS = 1 << 15
+};
+
+enum {
+ MLX4_CMPT_TYPE_QP = 0,
+ MLX4_CMPT_TYPE_SRQ = 1,
+ MLX4_CMPT_TYPE_CQ = 2,
+ MLX4_CMPT_TYPE_EQ = 3,
+ MLX4_CMPT_NUM_TYPE
+};
+
+enum {
+ MLX4_CMPT_SHIFT = 24,
+ MLX4_NUM_CMPTS = MLX4_CMPT_NUM_TYPE << MLX4_CMPT_SHIFT
+};
+
+#ifdef CONFIG_MLX4_DEBUG
+extern int mlx4_debug_level;
+
+#define mlx4_dbg(mdev, format, arg...) \
+ do { \
+ if (mlx4_debug_level) \
+ dev_printk(KERN_DEBUG, &mdev->pdev->dev, format, ## arg); \
+ } while (0)
+
+#else /* CONFIG_MLX4_DEBUG */
+
+#define mlx4_dbg(mdev, format, arg...) do { (void) mdev; } while (0)
+
+#endif /* CONFIG_MLX4_DEBUG */
+
+#define mlx4_err(mdev, format, arg...) \
+ dev_err(&mdev->pdev->dev, format, ## arg)
+#define mlx4_info(mdev, format, arg...) \
+ dev_info(&mdev->pdev->dev, format, ## arg)
+#define mlx4_warn(mdev, format, arg...) \
+ dev_warn(&mdev->pdev->dev, format, ## arg)
+
+struct mlx4_bitmap {
+ u32 last;
+ u32 top;
+ u32 max;
+ u32 mask;
+ spinlock_t lock;
+ unsigned long *table;
+};
+
+struct mlx4_buddy {
+ unsigned long **bits;
+ int max_order;
+ spinlock_t lock;
+};
+
+struct mlx4_icm;
+
+struct mlx4_icm_table {
+ u64 virt;
+ int num_icm;
+ int num_obj;
+ int obj_size;
+ int lowmem;
+ struct mutex mutex;
+ struct mlx4_icm **icm;
+};
+
+struct mlx4_eq {
+ struct mlx4_dev *dev;
+ void __iomem *doorbell;
+ int eqn;
+ u32 cons_index;
+ u16 irq;
+ u16 have_irq;
+ int nent;
+ struct mlx4_buf_list *page_list;
+ struct mlx4_mtt mtt;
+};
+
+struct mlx4_profile {
+ int num_qp;
+ int rdmarc_per_qp;
+ int num_srq;
+ int num_cq;
+ int num_mcg;
+ int num_mpt;
+ int num_mtt;
+};
+
+struct mlx4_fw {
+ u64 clr_int_base;
+ u64 catas_offset;
+ struct mlx4_icm *fw_icm;
+ struct mlx4_icm *aux_icm;
+ u32 catas_size;
+ u16 fw_pages;
+ u8 clr_int_bar;
+ u8 catas_bar;
+};
+
+struct mlx4_cmd {
+ struct pci_pool *pool;
+ void __iomem *hcr;
+ struct mutex hcr_mutex;
+ struct semaphore poll_sem;
+ struct semaphore event_sem;
+ int max_cmds;
+ spinlock_t context_lock;
+ int free_head;
+ struct mlx4_cmd_context *context;
+ u16 token_mask;
+ u8 use_events;
+ u8 toggle;
+};
+
+struct mlx4_uar_table {
+ struct mlx4_bitmap bitmap;
+};
+
+struct mlx4_mr_table {
+ struct mlx4_bitmap mpt_bitmap;
+ struct mlx4_buddy mtt_buddy;
+ u64 mtt_base;
+ u64 mpt_base;
+ struct mlx4_icm_table mtt_table;
+ struct mlx4_icm_table dmpt_table;
+};
+
+struct mlx4_cq_table {
+ struct mlx4_bitmap bitmap;
+ spinlock_t lock;
+ struct radix_tree_root tree;
+ struct mlx4_icm_table table;
+ struct mlx4_icm_table cmpt_table;
+};
+
+struct mlx4_eq_table {
+ struct mlx4_bitmap bitmap;
+ void __iomem *clr_int;
+ void __iomem *uar_map[(MLX4_NUM_EQ + 6) / 4];
+ u32 clr_mask;
+ struct mlx4_eq eq[MLX4_NUM_EQ];
+ u64 icm_virt;
+ struct page *icm_page;
+ dma_addr_t icm_dma;
+ struct mlx4_icm_table cmpt_table;
+ int have_irq;
+ u8 inta_pin;
+};
+
+struct mlx4_srq_table {
+ struct mlx4_bitmap bitmap;
+ spinlock_t lock;
+ struct radix_tree_root tree;
+ struct mlx4_icm_table table;
+ struct mlx4_icm_table cmpt_table;
+};
+
+struct mlx4_qp_table {
+ struct mlx4_bitmap bitmap;
+ u32 rdmarc_base;
+ int rdmarc_shift;
+ spinlock_t lock;
+ struct mlx4_icm_table qp_table;
+ struct mlx4_icm_table auxc_table;
+ struct mlx4_icm_table altc_table;
+ struct mlx4_icm_table rdmarc_table;
+ struct mlx4_icm_table cmpt_table;
+};
+
+struct mlx4_mcg_table {
+ struct mutex mutex;
+ struct mlx4_bitmap bitmap;
+ struct mlx4_icm_table table;
+};
+
+struct mlx4_catas_err {
+ u32 __iomem *map;
+ int size;
+};
+
+struct mlx4_priv {
+ struct mlx4_dev dev;
+
+ struct list_head dev_list;
+ struct list_head ctx_list;
+ spinlock_t ctx_lock;
+
+ struct mlx4_fw fw;
+ struct mlx4_cmd cmd;
+
+ struct mlx4_bitmap pd_bitmap;
+ struct mlx4_uar_table uar_table;
+ struct mlx4_mr_table mr_table;
+ struct mlx4_cq_table cq_table;
+ struct mlx4_eq_table eq_table;
+ struct mlx4_srq_table srq_table;
+ struct mlx4_qp_table qp_table;
+ struct mlx4_mcg_table mcg_table;
+
+ struct mlx4_catas_err catas_err;
+
+ void __iomem *clr_base;
+
+ struct mlx4_uar driver_uar;
+ void __iomem *kar;
+ MLX4_DECLARE_DOORBELL_LOCK(doorbell_lock)
+
+ u32 rev_id;
+ char board_id[MLX4_BOARD_ID_LEN];
+};
+
+static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev)
+{
+ return container_of(dev, struct mlx4_priv, dev);
+}
+
+u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap);
+void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj);
+int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask, u32 reserved);
+void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap);
+
+int mlx4_reset(struct mlx4_dev *dev);
+
+int mlx4_init_pd_table(struct mlx4_dev *dev);
+int mlx4_init_uar_table(struct mlx4_dev *dev);
+int mlx4_init_mr_table(struct mlx4_dev *dev);
+int mlx4_init_eq_table(struct mlx4_dev *dev);
+int mlx4_init_cq_table(struct mlx4_dev *dev);
+int mlx4_init_qp_table(struct mlx4_dev *dev);
+int mlx4_init_srq_table(struct mlx4_dev *dev);
+int mlx4_init_mcg_table(struct mlx4_dev *dev);
+
+void mlx4_cleanup_pd_table(struct mlx4_dev *dev);
+void mlx4_cleanup_uar_table(struct mlx4_dev *dev);
+void mlx4_cleanup_mr_table(struct mlx4_dev *dev);
+void mlx4_cleanup_eq_table(struct mlx4_dev *dev);
+void mlx4_cleanup_cq_table(struct mlx4_dev *dev);
+void mlx4_cleanup_qp_table(struct mlx4_dev *dev);
+void mlx4_cleanup_srq_table(struct mlx4_dev *dev);
+void mlx4_cleanup_mcg_table(struct mlx4_dev *dev);
+
+void mlx4_map_catas_buf(struct mlx4_dev *dev);
+void mlx4_unmap_catas_buf(struct mlx4_dev *dev);
+
+int mlx4_register_device(struct mlx4_dev *dev);
+void mlx4_unregister_device(struct mlx4_dev *dev);
+void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_event type,
+ int subtype, int port);
+
+struct mlx4_dev_cap;
+struct mlx4_init_hca_param;
+
+u64 mlx4_make_profile(struct mlx4_dev *dev,
+ struct mlx4_profile *request,
+ struct mlx4_dev_cap *dev_cap,
+ struct mlx4_init_hca_param *init_hca);
+
+int mlx4_map_eq_icm(struct mlx4_dev *dev, u64 icm_virt);
+void mlx4_unmap_eq_icm(struct mlx4_dev *dev);
+
+int mlx4_cmd_init(struct mlx4_dev *dev);
+void mlx4_cmd_cleanup(struct mlx4_dev *dev);
+void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param);
+int mlx4_cmd_use_events(struct mlx4_dev *dev);
+void mlx4_cmd_use_polling(struct mlx4_dev *dev);
+
+void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn);
+void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type);
+
+void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type);
+
+void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type);
+
+void mlx4_handle_catas_err(struct mlx4_dev *dev);
+
+#endif /* MLX4_H */
diff --git a/drivers/net/mlx4/mr.c b/drivers/net/mlx4/mr.c
new file mode 100644
index 00000000000..b33864dab17
--- /dev/null
+++ b/drivers/net/mlx4/mr.c
@@ -0,0 +1,479 @@
+/*
+ * Copyright (c) 2004 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/init.h>
+#include <linux/errno.h>
+
+#include <linux/mlx4/cmd.h>
+
+#include "mlx4.h"
+#include "icm.h"
+
+/*
+ * Must be packed because mtt_seg is 64 bits but only aligned to 32 bits.
+ */
+struct mlx4_mpt_entry {
+ __be32 flags;
+ __be32 qpn;
+ __be32 key;
+ __be32 pd;
+ __be64 start;
+ __be64 length;
+ __be32 lkey;
+ __be32 win_cnt;
+ u8 reserved1[3];
+ u8 mtt_rep;
+ __be64 mtt_seg;
+ __be32 mtt_sz;
+ __be32 entity_size;
+ __be32 first_byte_offset;
+} __attribute__((packed));
+
+#define MLX4_MPT_FLAG_SW_OWNS (0xfUL << 28)
+#define MLX4_MPT_FLAG_MIO (1 << 17)
+#define MLX4_MPT_FLAG_BIND_ENABLE (1 << 15)
+#define MLX4_MPT_FLAG_PHYSICAL (1 << 9)
+#define MLX4_MPT_FLAG_REGION (1 << 8)
+
+#define MLX4_MTT_FLAG_PRESENT 1
+
+static u32 mlx4_buddy_alloc(struct mlx4_buddy *buddy, int order)
+{
+ int o;
+ int m;
+ u32 seg;
+
+ spin_lock(&buddy->lock);
+
+ for (o = order; o <= buddy->max_order; ++o) {
+ m = 1 << (buddy->max_order - o);
+ seg = find_first_bit(buddy->bits[o], m);
+ if (seg < m)
+ goto found;
+ }
+
+ spin_unlock(&buddy->lock);
+ return -1;
+
+ found:
+ clear_bit(seg, buddy->bits[o]);
+
+ while (o > order) {
+ --o;
+ seg <<= 1;
+ set_bit(seg ^ 1, buddy->bits[o]);
+ }
+
+ spin_unlock(&buddy->lock);
+
+ seg <<= order;
+
+ return seg;
+}
+
+static void mlx4_buddy_free(struct mlx4_buddy *buddy, u32 seg, int order)
+{
+ seg >>= order;
+
+ spin_lock(&buddy->lock);
+
+ while (test_bit(seg ^ 1, buddy->bits[order])) {
+ clear_bit(seg ^ 1, buddy->bits[order]);
+ seg >>= 1;
+ ++order;
+ }
+
+ set_bit(seg, buddy->bits[order]);
+
+ spin_unlock(&buddy->lock);
+}
+
+static int __devinit mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order)
+{
+ int i, s;
+
+ buddy->max_order = max_order;
+ spin_lock_init(&buddy->lock);
+
+ buddy->bits = kzalloc((buddy->max_order + 1) * sizeof (long *),
+ GFP_KERNEL);
+ if (!buddy->bits)
+ goto err_out;
+
+ for (i = 0; i <= buddy->max_order; ++i) {
+ s = BITS_TO_LONGS(1 << (buddy->max_order - i));
+ buddy->bits[i] = kmalloc(s * sizeof (long), GFP_KERNEL);
+ if (!buddy->bits[i])
+ goto err_out_free;
+ bitmap_zero(buddy->bits[i], 1 << (buddy->max_order - i));
+ }
+
+ set_bit(0, buddy->bits[buddy->max_order]);
+
+ return 0;
+
+err_out_free:
+ for (i = 0; i <= buddy->max_order; ++i)
+ kfree(buddy->bits[i]);
+
+ kfree(buddy->bits);
+
+err_out:
+ return -ENOMEM;
+}
+
+static void mlx4_buddy_cleanup(struct mlx4_buddy *buddy)
+{
+ int i;
+
+ for (i = 0; i <= buddy->max_order; ++i)
+ kfree(buddy->bits[i]);
+
+ kfree(buddy->bits);
+}
+
+static u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order)
+{
+ struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
+ u32 seg;
+
+ seg = mlx4_buddy_alloc(&mr_table->mtt_buddy, order);
+ if (seg == -1)
+ return -1;
+
+ if (mlx4_table_get_range(dev, &mr_table->mtt_table, seg,
+ seg + (1 << order) - 1)) {
+ mlx4_buddy_free(&mr_table->mtt_buddy, seg, order);
+ return -1;
+ }
+
+ return seg;
+}
+
+int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift,
+ struct mlx4_mtt *mtt)
+{
+ int i;
+
+ if (!npages) {
+ mtt->order = -1;
+ mtt->page_shift = MLX4_ICM_PAGE_SHIFT;
+ return 0;
+ } else
+ mtt->page_shift = page_shift;
+
+ for (mtt->order = 0, i = MLX4_MTT_ENTRY_PER_SEG; i < npages; i <<= 1)
+ ++mtt->order;
+
+ mtt->first_seg = mlx4_alloc_mtt_range(dev, mtt->order);
+ if (mtt->first_seg == -1)
+ return -ENOMEM;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx4_mtt_init);
+
+void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt)
+{
+ struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
+
+ if (mtt->order < 0)
+ return;
+
+ mlx4_buddy_free(&mr_table->mtt_buddy, mtt->first_seg, mtt->order);
+ mlx4_table_put_range(dev, &mr_table->mtt_table, mtt->first_seg,
+ mtt->first_seg + (1 << mtt->order) - 1);
+}
+EXPORT_SYMBOL_GPL(mlx4_mtt_cleanup);
+
+u64 mlx4_mtt_addr(struct mlx4_dev *dev, struct mlx4_mtt *mtt)
+{
+ return (u64) mtt->first_seg * dev->caps.mtt_entry_sz;
+}
+EXPORT_SYMBOL_GPL(mlx4_mtt_addr);
+
+static u32 hw_index_to_key(u32 ind)
+{
+ return (ind >> 24) | (ind << 8);
+}
+
+static u32 key_to_hw_index(u32 key)
+{
+ return (key << 24) | (key >> 8);
+}
+
+static int mlx4_SW2HW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
+ int mpt_index)
+{
+ return mlx4_cmd(dev, mailbox->dma, mpt_index, 0, MLX4_CMD_SW2HW_MPT,
+ MLX4_CMD_TIME_CLASS_B);
+}
+
+static int mlx4_HW2SW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
+ int mpt_index)
+{
+ return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, mpt_index,
+ !mailbox, MLX4_CMD_HW2SW_MPT, MLX4_CMD_TIME_CLASS_B);
+}
+
+int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access,
+ int npages, int page_shift, struct mlx4_mr *mr)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ u32 index;
+ int err;
+
+ index = mlx4_bitmap_alloc(&priv->mr_table.mpt_bitmap);
+ if (index == -1) {
+ err = -ENOMEM;
+ goto err;
+ }
+
+ mr->iova = iova;
+ mr->size = size;
+ mr->pd = pd;
+ mr->access = access;
+ mr->enabled = 0;
+ mr->key = hw_index_to_key(index);
+
+ err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt);
+ if (err)
+ goto err_index;
+
+ return 0;
+
+err_index:
+ mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index);
+
+err:
+ kfree(mr);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_mr_alloc);
+
+void mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int err;
+
+ if (mr->enabled) {
+ err = mlx4_HW2SW_MPT(dev, NULL,
+ key_to_hw_index(mr->key) &
+ (dev->caps.num_mpts - 1));
+ if (err)
+ mlx4_warn(dev, "HW2SW_MPT failed (%d)\n", err);
+ }
+
+ mlx4_mtt_cleanup(dev, &mr->mtt);
+ mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, key_to_hw_index(mr->key));
+}
+EXPORT_SYMBOL_GPL(mlx4_mr_free);
+
+int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
+{
+ struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
+ struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_mpt_entry *mpt_entry;
+ int err;
+
+ err = mlx4_table_get(dev, &mr_table->dmpt_table, key_to_hw_index(mr->key));
+ if (err)
+ return err;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox)) {
+ err = PTR_ERR(mailbox);
+ goto err_table;
+ }
+ mpt_entry = mailbox->buf;
+
+ memset(mpt_entry, 0, sizeof *mpt_entry);
+
+ mpt_entry->flags = cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS |
+ MLX4_MPT_FLAG_MIO |
+ MLX4_MPT_FLAG_REGION |
+ mr->access);
+ if (mr->mtt.order < 0)
+ mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL);
+
+ mpt_entry->key = cpu_to_be32(key_to_hw_index(mr->key));
+ mpt_entry->pd = cpu_to_be32(mr->pd);
+ mpt_entry->start = cpu_to_be64(mr->iova);
+ mpt_entry->length = cpu_to_be64(mr->size);
+ mpt_entry->entity_size = cpu_to_be32(mr->mtt.page_shift);
+ mpt_entry->mtt_seg = cpu_to_be64(mlx4_mtt_addr(dev, &mr->mtt));
+
+ err = mlx4_SW2HW_MPT(dev, mailbox,
+ key_to_hw_index(mr->key) & (dev->caps.num_mpts - 1));
+ if (err) {
+ mlx4_warn(dev, "SW2HW_MPT failed (%d)\n", err);
+ goto err_cmd;
+ }
+
+ mr->enabled = 1;
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+
+ return 0;
+
+err_cmd:
+ mlx4_free_cmd_mailbox(dev, mailbox);
+
+err_table:
+ mlx4_table_put(dev, &mr_table->dmpt_table, key_to_hw_index(mr->key));
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_mr_enable);
+
+static int mlx4_WRITE_MTT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
+ int num_mtt)
+{
+ return mlx4_cmd(dev, mailbox->dma, num_mtt, 0, MLX4_CMD_WRITE_MTT,
+ MLX4_CMD_TIME_CLASS_B);
+}
+
+int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
+ int start_index, int npages, u64 *page_list)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ __be64 *mtt_entry;
+ int i;
+ int err = 0;
+
+ if (mtt->order < 0)
+ return -EINVAL;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ mtt_entry = mailbox->buf;
+
+ while (npages > 0) {
+ mtt_entry[0] = cpu_to_be64(mlx4_mtt_addr(dev, mtt) + start_index * 8);
+ mtt_entry[1] = 0;
+
+ for (i = 0; i < npages && i < MLX4_MAILBOX_SIZE / 8 - 2; ++i)
+ mtt_entry[i + 2] = cpu_to_be64(page_list[i] |
+ MLX4_MTT_FLAG_PRESENT);
+
+ /*
+ * If we have an odd number of entries to write, add
+ * one more dummy entry for firmware efficiency.
+ */
+ if (i & 1)
+ mtt_entry[i + 2] = 0;
+
+ err = mlx4_WRITE_MTT(dev, mailbox, (i + 1) & ~1);
+ if (err)
+ goto out;
+
+ npages -= i;
+ start_index += i;
+ page_list += i;
+ }
+
+out:
+ mlx4_free_cmd_mailbox(dev, mailbox);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_write_mtt);
+
+int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
+ struct mlx4_buf *buf)
+{
+ u64 *page_list;
+ int err;
+ int i;
+
+ page_list = kmalloc(buf->npages * sizeof *page_list, GFP_KERNEL);
+ if (!page_list)
+ return -ENOMEM;
+
+ for (i = 0; i < buf->npages; ++i)
+ if (buf->nbufs == 1)
+ page_list[i] = buf->u.direct.map + (i << buf->page_shift);
+ else
+ page_list[i] = buf->u.page_list[i].map;
+
+ err = mlx4_write_mtt(dev, mtt, 0, buf->npages, page_list);
+
+ kfree(page_list);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_buf_write_mtt);
+
+int __devinit mlx4_init_mr_table(struct mlx4_dev *dev)
+{
+ struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
+ int err;
+
+ err = mlx4_bitmap_init(&mr_table->mpt_bitmap, dev->caps.num_mpts,
+ ~0, dev->caps.reserved_mrws);
+ if (err)
+ return err;
+
+ err = mlx4_buddy_init(&mr_table->mtt_buddy,
+ ilog2(dev->caps.num_mtt_segs));
+ if (err)
+ goto err_buddy;
+
+ if (dev->caps.reserved_mtts) {
+ if (mlx4_alloc_mtt_range(dev, ilog2(dev->caps.reserved_mtts)) == -1) {
+ mlx4_warn(dev, "MTT table of order %d is too small.\n",
+ mr_table->mtt_buddy.max_order);
+ err = -ENOMEM;
+ goto err_reserve_mtts;
+ }
+ }
+
+ return 0;
+
+err_reserve_mtts:
+ mlx4_buddy_cleanup(&mr_table->mtt_buddy);
+
+err_buddy:
+ mlx4_bitmap_cleanup(&mr_table->mpt_bitmap);
+
+ return err;
+}
+
+void mlx4_cleanup_mr_table(struct mlx4_dev *dev)
+{
+ struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
+
+ mlx4_buddy_cleanup(&mr_table->mtt_buddy);
+ mlx4_bitmap_cleanup(&mr_table->mpt_bitmap);
+}
diff --git a/drivers/net/mlx4/pd.c b/drivers/net/mlx4/pd.c
new file mode 100644
index 00000000000..23dea1ee775
--- /dev/null
+++ b/drivers/net/mlx4/pd.c
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/init.h>
+#include <linux/errno.h>
+
+#include <asm/page.h>
+
+#include "mlx4.h"
+#include "icm.h"
+
+int mlx4_pd_alloc(struct mlx4_dev *dev, u32 *pdn)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ *pdn = mlx4_bitmap_alloc(&priv->pd_bitmap);
+ if (*pdn == -1)
+ return -ENOMEM;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx4_pd_alloc);
+
+void mlx4_pd_free(struct mlx4_dev *dev, u32 pdn)
+{
+ mlx4_bitmap_free(&mlx4_priv(dev)->pd_bitmap, pdn);
+}
+EXPORT_SYMBOL_GPL(mlx4_pd_free);
+
+int __devinit mlx4_init_pd_table(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ return mlx4_bitmap_init(&priv->pd_bitmap, dev->caps.num_pds,
+ (1 << 24) - 1, dev->caps.reserved_pds);
+}
+
+void mlx4_cleanup_pd_table(struct mlx4_dev *dev)
+{
+ mlx4_bitmap_cleanup(&mlx4_priv(dev)->pd_bitmap);
+}
+
+
+int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar)
+{
+ uar->index = mlx4_bitmap_alloc(&mlx4_priv(dev)->uar_table.bitmap);
+ if (uar->index == -1)
+ return -ENOMEM;
+
+ uar->pfn = (pci_resource_start(dev->pdev, 2) >> PAGE_SHIFT) + uar->index;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx4_uar_alloc);
+
+void mlx4_uar_free(struct mlx4_dev *dev, struct mlx4_uar *uar)
+{
+ mlx4_bitmap_free(&mlx4_priv(dev)->uar_table.bitmap, uar->index);
+}
+EXPORT_SYMBOL_GPL(mlx4_uar_free);
+
+int mlx4_init_uar_table(struct mlx4_dev *dev)
+{
+ return mlx4_bitmap_init(&mlx4_priv(dev)->uar_table.bitmap,
+ dev->caps.num_uars, dev->caps.num_uars - 1,
+ max(128, dev->caps.reserved_uars));
+}
+
+void mlx4_cleanup_uar_table(struct mlx4_dev *dev)
+{
+ mlx4_bitmap_cleanup(&mlx4_priv(dev)->uar_table.bitmap);
+}
diff --git a/drivers/net/mlx4/profile.c b/drivers/net/mlx4/profile.c
new file mode 100644
index 00000000000..9ca42b213d5
--- /dev/null
+++ b/drivers/net/mlx4/profile.c
@@ -0,0 +1,238 @@
+/*
+ * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/init.h>
+
+#include "mlx4.h"
+#include "fw.h"
+
+enum {
+ MLX4_RES_QP,
+ MLX4_RES_RDMARC,
+ MLX4_RES_ALTC,
+ MLX4_RES_AUXC,
+ MLX4_RES_SRQ,
+ MLX4_RES_CQ,
+ MLX4_RES_EQ,
+ MLX4_RES_DMPT,
+ MLX4_RES_CMPT,
+ MLX4_RES_MTT,
+ MLX4_RES_MCG,
+ MLX4_RES_NUM
+};
+
+static const char *res_name[] = {
+ [MLX4_RES_QP] = "QP",
+ [MLX4_RES_RDMARC] = "RDMARC",
+ [MLX4_RES_ALTC] = "ALTC",
+ [MLX4_RES_AUXC] = "AUXC",
+ [MLX4_RES_SRQ] = "SRQ",
+ [MLX4_RES_CQ] = "CQ",
+ [MLX4_RES_EQ] = "EQ",
+ [MLX4_RES_DMPT] = "DMPT",
+ [MLX4_RES_CMPT] = "CMPT",
+ [MLX4_RES_MTT] = "MTT",
+ [MLX4_RES_MCG] = "MCG",
+};
+
+u64 mlx4_make_profile(struct mlx4_dev *dev,
+ struct mlx4_profile *request,
+ struct mlx4_dev_cap *dev_cap,
+ struct mlx4_init_hca_param *init_hca)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource {
+ u64 size;
+ u64 start;
+ int type;
+ int num;
+ int log_num;
+ };
+
+ u64 total_size = 0;
+ struct mlx4_resource *profile;
+ struct mlx4_resource tmp;
+ int i, j;
+
+ profile = kzalloc(MLX4_RES_NUM * sizeof *profile, GFP_KERNEL);
+ if (!profile)
+ return -ENOMEM;
+
+ profile[MLX4_RES_QP].size = dev_cap->qpc_entry_sz;
+ profile[MLX4_RES_RDMARC].size = dev_cap->rdmarc_entry_sz;
+ profile[MLX4_RES_ALTC].size = dev_cap->altc_entry_sz;
+ profile[MLX4_RES_AUXC].size = dev_cap->aux_entry_sz;
+ profile[MLX4_RES_SRQ].size = dev_cap->srq_entry_sz;
+ profile[MLX4_RES_CQ].size = dev_cap->cqc_entry_sz;
+ profile[MLX4_RES_EQ].size = dev_cap->eqc_entry_sz;
+ profile[MLX4_RES_DMPT].size = dev_cap->dmpt_entry_sz;
+ profile[MLX4_RES_CMPT].size = dev_cap->cmpt_entry_sz;
+ profile[MLX4_RES_MTT].size = MLX4_MTT_ENTRY_PER_SEG * dev_cap->mtt_entry_sz;
+ profile[MLX4_RES_MCG].size = MLX4_MGM_ENTRY_SIZE;
+
+ profile[MLX4_RES_QP].num = request->num_qp;
+ profile[MLX4_RES_RDMARC].num = request->num_qp * request->rdmarc_per_qp;
+ profile[MLX4_RES_ALTC].num = request->num_qp;
+ profile[MLX4_RES_AUXC].num = request->num_qp;
+ profile[MLX4_RES_SRQ].num = request->num_srq;
+ profile[MLX4_RES_CQ].num = request->num_cq;
+ profile[MLX4_RES_EQ].num = MLX4_NUM_EQ + dev_cap->reserved_eqs;
+ profile[MLX4_RES_DMPT].num = request->num_mpt;
+ profile[MLX4_RES_CMPT].num = MLX4_NUM_CMPTS;
+ profile[MLX4_RES_MTT].num = request->num_mtt;
+ profile[MLX4_RES_MCG].num = request->num_mcg;
+
+ for (i = 0; i < MLX4_RES_NUM; ++i) {
+ profile[i].type = i;
+ profile[i].num = roundup_pow_of_two(profile[i].num);
+ profile[i].log_num = ilog2(profile[i].num);
+ profile[i].size *= profile[i].num;
+ profile[i].size = max(profile[i].size, (u64) PAGE_SIZE);
+ }
+
+ /*
+ * Sort the resources in decreasing order of size. Since they
+ * all have sizes that are powers of 2, we'll be able to keep
+ * resources aligned to their size and pack them without gaps
+ * using the sorted order.
+ */
+ for (i = MLX4_RES_NUM; i > 0; --i)
+ for (j = 1; j < i; ++j) {
+ if (profile[j].size > profile[j - 1].size) {
+ tmp = profile[j];
+ profile[j] = profile[j - 1];
+ profile[j - 1] = tmp;
+ }
+ }
+
+ for (i = 0; i < MLX4_RES_NUM; ++i) {
+ if (profile[i].size) {
+ profile[i].start = total_size;
+ total_size += profile[i].size;
+ }
+
+ if (total_size > dev_cap->max_icm_sz) {
+ mlx4_err(dev, "Profile requires 0x%llx bytes; "
+ "won't fit in 0x%llx bytes of context memory.\n",
+ (unsigned long long) total_size,
+ (unsigned long long) dev_cap->max_icm_sz);
+ kfree(profile);
+ return -ENOMEM;
+ }
+
+ if (profile[i].size)
+ mlx4_dbg(dev, " profile[%2d] (%6s): 2^%02d entries @ 0x%10llx, "
+ "size 0x%10llx\n",
+ i, res_name[profile[i].type], profile[i].log_num,
+ (unsigned long long) profile[i].start,
+ (unsigned long long) profile[i].size);
+ }
+
+ mlx4_dbg(dev, "HCA context memory: reserving %d KB\n",
+ (int) (total_size >> 10));
+
+ for (i = 0; i < MLX4_RES_NUM; ++i) {
+ switch (profile[i].type) {
+ case MLX4_RES_QP:
+ dev->caps.num_qps = profile[i].num;
+ init_hca->qpc_base = profile[i].start;
+ init_hca->log_num_qps = profile[i].log_num;
+ break;
+ case MLX4_RES_RDMARC:
+ for (priv->qp_table.rdmarc_shift = 0;
+ request->num_qp << priv->qp_table.rdmarc_shift < profile[i].num;
+ ++priv->qp_table.rdmarc_shift)
+ ; /* nothing */
+ dev->caps.max_qp_dest_rdma = 1 << priv->qp_table.rdmarc_shift;
+ priv->qp_table.rdmarc_base = (u32) profile[i].start;
+ init_hca->rdmarc_base = profile[i].start;
+ init_hca->log_rd_per_qp = priv->qp_table.rdmarc_shift;
+ break;
+ case MLX4_RES_ALTC:
+ init_hca->altc_base = profile[i].start;
+ break;
+ case MLX4_RES_AUXC:
+ init_hca->auxc_base = profile[i].start;
+ break;
+ case MLX4_RES_SRQ:
+ dev->caps.num_srqs = profile[i].num;
+ init_hca->srqc_base = profile[i].start;
+ init_hca->log_num_srqs = profile[i].log_num;
+ break;
+ case MLX4_RES_CQ:
+ dev->caps.num_cqs = profile[i].num;
+ init_hca->cqc_base = profile[i].start;
+ init_hca->log_num_cqs = profile[i].log_num;
+ break;
+ case MLX4_RES_EQ:
+ dev->caps.num_eqs = profile[i].num;
+ init_hca->eqc_base = profile[i].start;
+ init_hca->log_num_eqs = profile[i].log_num;
+ break;
+ case MLX4_RES_DMPT:
+ dev->caps.num_mpts = profile[i].num;
+ priv->mr_table.mpt_base = profile[i].start;
+ init_hca->dmpt_base = profile[i].start;
+ init_hca->log_mpt_sz = profile[i].log_num;
+ break;
+ case MLX4_RES_CMPT:
+ init_hca->cmpt_base = profile[i].start;
+ break;
+ case MLX4_RES_MTT:
+ dev->caps.num_mtt_segs = profile[i].num;
+ priv->mr_table.mtt_base = profile[i].start;
+ init_hca->mtt_base = profile[i].start;
+ break;
+ case MLX4_RES_MCG:
+ dev->caps.num_mgms = profile[i].num >> 1;
+ dev->caps.num_amgms = profile[i].num >> 1;
+ init_hca->mc_base = profile[i].start;
+ init_hca->log_mc_entry_sz = ilog2(MLX4_MGM_ENTRY_SIZE);
+ init_hca->log_mc_table_sz = profile[i].log_num;
+ init_hca->log_mc_hash_sz = profile[i].log_num - 1;
+ break;
+ default:
+ break;
+ }
+ }
+
+ /*
+ * PDs don't take any HCA memory, but we assign them as part
+ * of the HCA profile anyway.
+ */
+ dev->caps.num_pds = MLX4_NUM_PDS;
+
+ kfree(profile);
+ return total_size;
+}
diff --git a/drivers/net/mlx4/qp.c b/drivers/net/mlx4/qp.c
new file mode 100644
index 00000000000..7f8b7d55b6e
--- /dev/null
+++ b/drivers/net/mlx4/qp.c
@@ -0,0 +1,280 @@
+/*
+ * Copyright (c) 2004 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/init.h>
+
+#include <linux/mlx4/cmd.h>
+#include <linux/mlx4/qp.h>
+
+#include "mlx4.h"
+#include "icm.h"
+
+void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type)
+{
+ struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
+ struct mlx4_qp *qp;
+
+ spin_lock(&qp_table->lock);
+
+ qp = __mlx4_qp_lookup(dev, qpn);
+ if (qp)
+ atomic_inc(&qp->refcount);
+
+ spin_unlock(&qp_table->lock);
+
+ if (!qp) {
+ mlx4_warn(dev, "Async event for bogus QP %08x\n", qpn);
+ return;
+ }
+
+ qp->event(qp, event_type);
+
+ if (atomic_dec_and_test(&qp->refcount))
+ complete(&qp->free);
+}
+
+int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
+ enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state,
+ struct mlx4_qp_context *context, enum mlx4_qp_optpar optpar,
+ int sqd_event, struct mlx4_qp *qp)
+{
+ static const u16 op[MLX4_QP_NUM_STATE][MLX4_QP_NUM_STATE] = {
+ [MLX4_QP_STATE_RST] = {
+ [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
+ [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
+ [MLX4_QP_STATE_INIT] = MLX4_CMD_RST2INIT_QP,
+ },
+ [MLX4_QP_STATE_INIT] = {
+ [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
+ [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
+ [MLX4_QP_STATE_INIT] = MLX4_CMD_INIT2INIT_QP,
+ [MLX4_QP_STATE_RTR] = MLX4_CMD_INIT2RTR_QP,
+ },
+ [MLX4_QP_STATE_RTR] = {
+ [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
+ [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
+ [MLX4_QP_STATE_RTS] = MLX4_CMD_RTR2RTS_QP,
+ },
+ [MLX4_QP_STATE_RTS] = {
+ [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
+ [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
+ [MLX4_QP_STATE_RTS] = MLX4_CMD_RTS2RTS_QP,
+ [MLX4_QP_STATE_SQD] = MLX4_CMD_RTS2SQD_QP,
+ },
+ [MLX4_QP_STATE_SQD] = {
+ [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
+ [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
+ [MLX4_QP_STATE_RTS] = MLX4_CMD_SQD2RTS_QP,
+ [MLX4_QP_STATE_SQD] = MLX4_CMD_SQD2SQD_QP,
+ },
+ [MLX4_QP_STATE_SQER] = {
+ [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
+ [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
+ [MLX4_QP_STATE_RTS] = MLX4_CMD_SQERR2RTS_QP,
+ },
+ [MLX4_QP_STATE_ERR] = {
+ [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
+ [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
+ }
+ };
+
+ struct mlx4_cmd_mailbox *mailbox;
+ int ret = 0;
+
+ if (cur_state < 0 || cur_state >= MLX4_QP_NUM_STATE ||
+ new_state < 0 || cur_state >= MLX4_QP_NUM_STATE ||
+ !op[cur_state][new_state])
+ return -EINVAL;
+
+ if (op[cur_state][new_state] == MLX4_CMD_2RST_QP)
+ return mlx4_cmd(dev, 0, qp->qpn, 2,
+ MLX4_CMD_2RST_QP, MLX4_CMD_TIME_CLASS_A);
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ if (cur_state == MLX4_QP_STATE_RST && new_state == MLX4_QP_STATE_INIT) {
+ u64 mtt_addr = mlx4_mtt_addr(dev, mtt);
+ context->mtt_base_addr_h = mtt_addr >> 32;
+ context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
+ context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
+ }
+
+ *(__be32 *) mailbox->buf = cpu_to_be32(optpar);
+ memcpy(mailbox->buf + 8, context, sizeof *context);
+
+ ((struct mlx4_qp_context *) (mailbox->buf + 8))->local_qpn =
+ cpu_to_be32(qp->qpn);
+
+ ret = mlx4_cmd(dev, mailbox->dma, qp->qpn | (!!sqd_event << 31),
+ new_state == MLX4_QP_STATE_RST ? 2 : 0,
+ op[cur_state][new_state], MLX4_CMD_TIME_CLASS_C);
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mlx4_qp_modify);
+
+int mlx4_qp_alloc(struct mlx4_dev *dev, int sqpn, struct mlx4_qp *qp)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_qp_table *qp_table = &priv->qp_table;
+ int err;
+
+ if (sqpn)
+ qp->qpn = sqpn;
+ else {
+ qp->qpn = mlx4_bitmap_alloc(&qp_table->bitmap);
+ if (qp->qpn == -1)
+ return -ENOMEM;
+ }
+
+ err = mlx4_table_get(dev, &qp_table->qp_table, qp->qpn);
+ if (err)
+ goto err_out;
+
+ err = mlx4_table_get(dev, &qp_table->auxc_table, qp->qpn);
+ if (err)
+ goto err_put_qp;
+
+ err = mlx4_table_get(dev, &qp_table->altc_table, qp->qpn);
+ if (err)
+ goto err_put_auxc;
+
+ err = mlx4_table_get(dev, &qp_table->rdmarc_table, qp->qpn);
+ if (err)
+ goto err_put_altc;
+
+ err = mlx4_table_get(dev, &qp_table->cmpt_table, qp->qpn);
+ if (err)
+ goto err_put_rdmarc;
+
+ spin_lock_irq(&qp_table->lock);
+ err = radix_tree_insert(&dev->qp_table_tree, qp->qpn & (dev->caps.num_qps - 1), qp);
+ spin_unlock_irq(&qp_table->lock);
+ if (err)
+ goto err_put_cmpt;
+
+ atomic_set(&qp->refcount, 1);
+ init_completion(&qp->free);
+
+ return 0;
+
+err_put_cmpt:
+ mlx4_table_put(dev, &qp_table->cmpt_table, qp->qpn);
+
+err_put_rdmarc:
+ mlx4_table_put(dev, &qp_table->rdmarc_table, qp->qpn);
+
+err_put_altc:
+ mlx4_table_put(dev, &qp_table->altc_table, qp->qpn);
+
+err_put_auxc:
+ mlx4_table_put(dev, &qp_table->auxc_table, qp->qpn);
+
+err_put_qp:
+ mlx4_table_put(dev, &qp_table->qp_table, qp->qpn);
+
+err_out:
+ if (!sqpn)
+ mlx4_bitmap_free(&qp_table->bitmap, qp->qpn);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_qp_alloc);
+
+void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp)
+{
+ struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
+ unsigned long flags;
+
+ spin_lock_irqsave(&qp_table->lock, flags);
+ radix_tree_delete(&dev->qp_table_tree, qp->qpn & (dev->caps.num_qps - 1));
+ spin_unlock_irqrestore(&qp_table->lock, flags);
+}
+EXPORT_SYMBOL_GPL(mlx4_qp_remove);
+
+void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp)
+{
+ struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
+
+ if (atomic_dec_and_test(&qp->refcount))
+ complete(&qp->free);
+ wait_for_completion(&qp->free);
+
+ mlx4_table_put(dev, &qp_table->cmpt_table, qp->qpn);
+ mlx4_table_put(dev, &qp_table->rdmarc_table, qp->qpn);
+ mlx4_table_put(dev, &qp_table->altc_table, qp->qpn);
+ mlx4_table_put(dev, &qp_table->auxc_table, qp->qpn);
+ mlx4_table_put(dev, &qp_table->qp_table, qp->qpn);
+
+ mlx4_bitmap_free(&qp_table->bitmap, qp->qpn);
+}
+EXPORT_SYMBOL_GPL(mlx4_qp_free);
+
+static int mlx4_CONF_SPECIAL_QP(struct mlx4_dev *dev, u32 base_qpn)
+{
+ return mlx4_cmd(dev, 0, base_qpn, 0, MLX4_CMD_CONF_SPECIAL_QP,
+ MLX4_CMD_TIME_CLASS_B);
+}
+
+int __devinit mlx4_init_qp_table(struct mlx4_dev *dev)
+{
+ struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
+ int err;
+
+ spin_lock_init(&qp_table->lock);
+ INIT_RADIX_TREE(&dev->qp_table_tree, GFP_ATOMIC);
+
+ /*
+ * We reserve 2 extra QPs per port for the special QPs. The
+ * block of special QPs must be aligned to a multiple of 8, so
+ * round up.
+ */
+ dev->caps.sqp_start = ALIGN(dev->caps.reserved_qps, 8);
+ err = mlx4_bitmap_init(&qp_table->bitmap, dev->caps.num_qps,
+ (1 << 24) - 1, dev->caps.sqp_start + 8);
+ if (err)
+ return err;
+
+ return mlx4_CONF_SPECIAL_QP(dev, dev->caps.sqp_start);
+}
+
+void mlx4_cleanup_qp_table(struct mlx4_dev *dev)
+{
+ mlx4_CONF_SPECIAL_QP(dev, 0);
+ mlx4_bitmap_cleanup(&mlx4_priv(dev)->qp_table.bitmap);
+}
diff --git a/drivers/net/mlx4/reset.c b/drivers/net/mlx4/reset.c
new file mode 100644
index 00000000000..51eef8492e9
--- /dev/null
+++ b/drivers/net/mlx4/reset.c
@@ -0,0 +1,181 @@
+/*
+ * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+
+#include "mlx4.h"
+
+int mlx4_reset(struct mlx4_dev *dev)
+{
+ void __iomem *reset;
+ u32 *hca_header = NULL;
+ int pcie_cap;
+ u16 devctl;
+ u16 linkctl;
+ u16 vendor;
+ unsigned long end;
+ u32 sem;
+ int i;
+ int err = 0;
+
+#define MLX4_RESET_BASE 0xf0000
+#define MLX4_RESET_SIZE 0x400
+#define MLX4_SEM_OFFSET 0x3fc
+#define MLX4_RESET_OFFSET 0x10
+#define MLX4_RESET_VALUE swab32(1)
+
+#define MLX4_SEM_TIMEOUT_JIFFIES (10 * HZ)
+#define MLX4_RESET_TIMEOUT_JIFFIES (2 * HZ)
+
+ /*
+ * Reset the chip. This is somewhat ugly because we have to
+ * save off the PCI header before reset and then restore it
+ * after the chip reboots. We skip config space offsets 22
+ * and 23 since those have a special meaning.
+ */
+
+ /* Do we need to save off the full 4K PCI Express header?? */
+ hca_header = kmalloc(256, GFP_KERNEL);
+ if (!hca_header) {
+ err = -ENOMEM;
+ mlx4_err(dev, "Couldn't allocate memory to save HCA "
+ "PCI header, aborting.\n");
+ goto out;
+ }
+
+ pcie_cap = pci_find_capability(dev->pdev, PCI_CAP_ID_EXP);
+
+ for (i = 0; i < 64; ++i) {
+ if (i == 22 || i == 23)
+ continue;
+ if (pci_read_config_dword(dev->pdev, i * 4, hca_header + i)) {
+ err = -ENODEV;
+ mlx4_err(dev, "Couldn't save HCA "
+ "PCI header, aborting.\n");
+ goto out;
+ }
+ }
+
+ reset = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_RESET_BASE,
+ MLX4_RESET_SIZE);
+ if (!reset) {
+ err = -ENOMEM;
+ mlx4_err(dev, "Couldn't map HCA reset register, aborting.\n");
+ goto out;
+ }
+
+ /* grab HW semaphore to lock out flash updates */
+ end = jiffies + MLX4_SEM_TIMEOUT_JIFFIES;
+ do {
+ sem = readl(reset + MLX4_SEM_OFFSET);
+ if (!sem)
+ break;
+
+ msleep(1);
+ } while (time_before(jiffies, end));
+
+ if (sem) {
+ mlx4_err(dev, "Failed to obtain HW semaphore, aborting\n");
+ err = -EAGAIN;
+ iounmap(reset);
+ goto out;
+ }
+
+ /* actually hit reset */
+ writel(MLX4_RESET_VALUE, reset + MLX4_RESET_OFFSET);
+ iounmap(reset);
+
+ end = jiffies + MLX4_RESET_TIMEOUT_JIFFIES;
+ do {
+ if (!pci_read_config_word(dev->pdev, PCI_VENDOR_ID, &vendor) &&
+ vendor != 0xffff)
+ break;
+
+ msleep(1);
+ } while (time_before(jiffies, end));
+
+ if (vendor == 0xffff) {
+ err = -ENODEV;
+ mlx4_err(dev, "PCI device did not come back after reset, "
+ "aborting.\n");
+ goto out;
+ }
+
+ /* Now restore the PCI headers */
+ if (pcie_cap) {
+ devctl = hca_header[(pcie_cap + PCI_EXP_DEVCTL) / 4];
+ if (pci_write_config_word(dev->pdev, pcie_cap + PCI_EXP_DEVCTL,
+ devctl)) {
+ err = -ENODEV;
+ mlx4_err(dev, "Couldn't restore HCA PCI Express "
+ "Device Control register, aborting.\n");
+ goto out;
+ }
+ linkctl = hca_header[(pcie_cap + PCI_EXP_LNKCTL) / 4];
+ if (pci_write_config_word(dev->pdev, pcie_cap + PCI_EXP_LNKCTL,
+ linkctl)) {
+ err = -ENODEV;
+ mlx4_err(dev, "Couldn't restore HCA PCI Express "
+ "Link control register, aborting.\n");
+ goto out;
+ }
+ }
+
+ for (i = 0; i < 16; ++i) {
+ if (i * 4 == PCI_COMMAND)
+ continue;
+
+ if (pci_write_config_dword(dev->pdev, i * 4, hca_header[i])) {
+ err = -ENODEV;
+ mlx4_err(dev, "Couldn't restore HCA reg %x, "
+ "aborting.\n", i);
+ goto out;
+ }
+ }
+
+ if (pci_write_config_dword(dev->pdev, PCI_COMMAND,
+ hca_header[PCI_COMMAND / 4])) {
+ err = -ENODEV;
+ mlx4_err(dev, "Couldn't restore HCA COMMAND, "
+ "aborting.\n");
+ goto out;
+ }
+
+out:
+ kfree(hca_header);
+
+ return err;
+}
diff --git a/drivers/net/mlx4/srq.c b/drivers/net/mlx4/srq.c
new file mode 100644
index 00000000000..2134f83aed8
--- /dev/null
+++ b/drivers/net/mlx4/srq.c
@@ -0,0 +1,227 @@
+/*
+ * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/init.h>
+
+#include <linux/mlx4/cmd.h>
+
+#include "mlx4.h"
+#include "icm.h"
+
+struct mlx4_srq_context {
+ __be32 state_logsize_srqn;
+ u8 logstride;
+ u8 reserved1[3];
+ u8 pg_offset;
+ u8 reserved2[3];
+ u32 reserved3;
+ u8 log_page_size;
+ u8 reserved4[2];
+ u8 mtt_base_addr_h;
+ __be32 mtt_base_addr_l;
+ __be32 pd;
+ __be16 limit_watermark;
+ __be16 wqe_cnt;
+ u16 reserved5;
+ __be16 wqe_counter;
+ u32 reserved6;
+ __be64 db_rec_addr;
+};
+
+void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type)
+{
+ struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
+ struct mlx4_srq *srq;
+
+ spin_lock(&srq_table->lock);
+
+ srq = radix_tree_lookup(&srq_table->tree, srqn & (dev->caps.num_srqs - 1));
+ if (srq)
+ atomic_inc(&srq->refcount);
+
+ spin_unlock(&srq_table->lock);
+
+ if (!srq) {
+ mlx4_warn(dev, "Async event for bogus SRQ %08x\n", srqn);
+ return;
+ }
+
+ srq->event(srq, event_type);
+
+ if (atomic_dec_and_test(&srq->refcount))
+ complete(&srq->free);
+}
+
+static int mlx4_SW2HW_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
+ int srq_num)
+{
+ return mlx4_cmd(dev, mailbox->dma, srq_num, 0, MLX4_CMD_SW2HW_SRQ,
+ MLX4_CMD_TIME_CLASS_A);
+}
+
+static int mlx4_HW2SW_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
+ int srq_num)
+{
+ return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, srq_num,
+ mailbox ? 0 : 1, MLX4_CMD_HW2SW_SRQ,
+ MLX4_CMD_TIME_CLASS_A);
+}
+
+static int mlx4_ARM_SRQ(struct mlx4_dev *dev, int srq_num, int limit_watermark)
+{
+ return mlx4_cmd(dev, limit_watermark, srq_num, 0, MLX4_CMD_ARM_SRQ,
+ MLX4_CMD_TIME_CLASS_B);
+}
+
+int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, struct mlx4_mtt *mtt,
+ u64 db_rec, struct mlx4_srq *srq)
+{
+ struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
+ struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_srq_context *srq_context;
+ u64 mtt_addr;
+ int err;
+
+ srq->srqn = mlx4_bitmap_alloc(&srq_table->bitmap);
+ if (srq->srqn == -1)
+ return -ENOMEM;
+
+ err = mlx4_table_get(dev, &srq_table->table, srq->srqn);
+ if (err)
+ goto err_out;
+
+ err = mlx4_table_get(dev, &srq_table->cmpt_table, srq->srqn);
+ if (err)
+ goto err_put;
+
+ spin_lock_irq(&srq_table->lock);
+ err = radix_tree_insert(&srq_table->tree, srq->srqn, srq);
+ spin_unlock_irq(&srq_table->lock);
+ if (err)
+ goto err_cmpt_put;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox)) {
+ err = PTR_ERR(mailbox);
+ goto err_radix;
+ }
+
+ srq_context = mailbox->buf;
+ memset(srq_context, 0, sizeof *srq_context);
+
+ srq_context->state_logsize_srqn = cpu_to_be32((ilog2(srq->max) << 24) |
+ srq->srqn);
+ srq_context->logstride = srq->wqe_shift - 4;
+ srq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
+
+ mtt_addr = mlx4_mtt_addr(dev, mtt);
+ srq_context->mtt_base_addr_h = mtt_addr >> 32;
+ srq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
+ srq_context->pd = cpu_to_be32(pdn);
+ srq_context->db_rec_addr = cpu_to_be64(db_rec);
+
+ err = mlx4_SW2HW_SRQ(dev, mailbox, srq->srqn);
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ if (err)
+ goto err_radix;
+
+ atomic_set(&srq->refcount, 1);
+ init_completion(&srq->free);
+
+ return 0;
+
+err_radix:
+ spin_lock_irq(&srq_table->lock);
+ radix_tree_delete(&srq_table->tree, srq->srqn);
+ spin_unlock_irq(&srq_table->lock);
+
+err_cmpt_put:
+ mlx4_table_put(dev, &srq_table->cmpt_table, srq->srqn);
+
+err_put:
+ mlx4_table_put(dev, &srq_table->table, srq->srqn);
+
+err_out:
+ mlx4_bitmap_free(&srq_table->bitmap, srq->srqn);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_srq_alloc);
+
+void mlx4_srq_free(struct mlx4_dev *dev, struct mlx4_srq *srq)
+{
+ struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
+ int err;
+
+ err = mlx4_HW2SW_SRQ(dev, NULL, srq->srqn);
+ if (err)
+ mlx4_warn(dev, "HW2SW_SRQ failed (%d) for SRQN %06x\n", err, srq->srqn);
+
+ spin_lock_irq(&srq_table->lock);
+ radix_tree_delete(&srq_table->tree, srq->srqn);
+ spin_unlock_irq(&srq_table->lock);
+
+ if (atomic_dec_and_test(&srq->refcount))
+ complete(&srq->free);
+ wait_for_completion(&srq->free);
+
+ mlx4_table_put(dev, &srq_table->table, srq->srqn);
+ mlx4_bitmap_free(&srq_table->bitmap, srq->srqn);
+}
+EXPORT_SYMBOL_GPL(mlx4_srq_free);
+
+int mlx4_srq_arm(struct mlx4_dev *dev, struct mlx4_srq *srq, int limit_watermark)
+{
+ return mlx4_ARM_SRQ(dev, srq->srqn, limit_watermark);
+}
+EXPORT_SYMBOL_GPL(mlx4_srq_arm);
+
+int __devinit mlx4_init_srq_table(struct mlx4_dev *dev)
+{
+ struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
+ int err;
+
+ spin_lock_init(&srq_table->lock);
+ INIT_RADIX_TREE(&srq_table->tree, GFP_ATOMIC);
+
+ err = mlx4_bitmap_init(&srq_table->bitmap, dev->caps.num_srqs,
+ dev->caps.num_srqs - 1, dev->caps.reserved_srqs);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+void mlx4_cleanup_srq_table(struct mlx4_dev *dev)
+{
+ mlx4_bitmap_cleanup(&mlx4_priv(dev)->srq_table.bitmap);
+}
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c
index 223e0e6264b..4cf0d3fcb51 100644
--- a/drivers/net/natsemi.c
+++ b/drivers/net/natsemi.c
@@ -131,7 +131,6 @@ static const char version[] __devinitdata =
KERN_INFO DRV_NAME " dp8381x driver, version "
DRV_VERSION ", " DRV_RELDATE "\n"
KERN_INFO " originally by Donald Becker <becker@scyld.com>\n"
- KERN_INFO " http://www.scyld.com/network/natsemi.html\n"
KERN_INFO " 2.4.x kernel port by Jeff Garzik, Tjeerd Mulder\n";
MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
diff --git a/drivers/net/ne2k-pci.c b/drivers/net/ne2k-pci.c
index 589785d1e76..995c0a5d406 100644
--- a/drivers/net/ne2k-pci.c
+++ b/drivers/net/ne2k-pci.c
@@ -63,8 +63,7 @@ static int options[MAX_UNITS];
/* These identify the driver base version and may not be removed. */
static char version[] __devinitdata =
-KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " D. Becker/P. Gortmaker\n"
-KERN_INFO " http://www.scyld.com/network/ne2k-pci.html\n";
+KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " D. Becker/P. Gortmaker\n";
#if defined(__powerpc__)
#define inl_le(addr) le32_to_cpu(inl(addr))
diff --git a/drivers/net/pcmcia/ibmtr_cs.c b/drivers/net/pcmcia/ibmtr_cs.c
index 1060154ae75..4ecb8ca5a99 100644
--- a/drivers/net/pcmcia/ibmtr_cs.c
+++ b/drivers/net/pcmcia/ibmtr_cs.c
@@ -189,16 +189,20 @@ static void ibmtr_detach(struct pcmcia_device *link)
{
struct ibmtr_dev_t *info = link->priv;
struct net_device *dev = info->dev;
+ struct tok_info *ti = netdev_priv(dev);
DEBUG(0, "ibmtr_detach(0x%p)\n", link);
+
+ /*
+ * When the card removal interrupt hits tok_interrupt(),
+ * bail out early, so we don't crash the machine
+ */
+ ti->sram_phys |= 1;
if (link->dev_node)
unregister_netdev(dev);
-
- {
- struct tok_info *ti = netdev_priv(dev);
- del_timer_sync(&(ti->tr_timer));
- }
+
+ del_timer_sync(&(ti->tr_timer));
ibmtr_release(link);
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index f994f129f3d..c0d3101eb6a 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -3,6 +3,7 @@
#
menu "PHY device support"
+ depends on !S390
config PHYLIB
tristate "PHY Device support and infrastructure"
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index eed433d6056..f71dab34766 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -662,10 +662,10 @@ int phy_stop_interrupts(struct phy_device *phydev)
phy_error(phydev);
/*
- * Finish any pending work; we might have been scheduled
- * to be called from keventd ourselves, though.
+ * Finish any pending work; we might have been scheduled to be called
+ * from keventd ourselves, but cancel_work_sync() handles that.
*/
- run_scheduled_work(&phydev->phy_queue);
+ cancel_work_sync(&phydev->phy_queue);
free_irq(phydev->irq, phydev);
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index b07da1054ad..e0489578945 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -3594,7 +3594,9 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
skge->duplex = -1;
skge->speed = -1;
skge->advertising = skge_supported_modes(hw);
- skge->wol = pci_wake_enabled(hw->pdev) ? wol_supported(hw) : 0;
+
+ if (pci_wake_enabled(hw->pdev))
+ skge->wol = wol_supported(hw) & WAKE_MAGIC;
hw->dev[port] = dev;
diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
index f51ba31970a..e1f912d0404 100644
--- a/drivers/net/sundance.c
+++ b/drivers/net/sundance.c
@@ -110,8 +110,7 @@ static char *media[MAX_UNITS];
/* These identify the driver base version and may not be removed. */
static char version[] =
-KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker\n"
-KERN_INFO " http://www.scyld.com/network/sundance.html\n";
+KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker\n";
MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
MODULE_DESCRIPTION("Sundance Alta Ethernet driver");
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index e5e901ecd80..923b9c725cc 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -3716,10 +3716,8 @@ static void tg3_reset_task(struct work_struct *work)
unsigned int restart_timer;
tg3_full_lock(tp, 0);
- tp->tg3_flags |= TG3_FLAG_IN_RESET_TASK;
if (!netif_running(tp->dev)) {
- tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
tg3_full_unlock(tp);
return;
}
@@ -3750,8 +3748,6 @@ static void tg3_reset_task(struct work_struct *work)
mod_timer(&tp->timer, jiffies + 1);
out:
- tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
-
tg3_full_unlock(tp);
}
@@ -7390,12 +7386,7 @@ static int tg3_close(struct net_device *dev)
{
struct tg3 *tp = netdev_priv(dev);
- /* Calling flush_scheduled_work() may deadlock because
- * linkwatch_event() may be on the workqueue and it will try to get
- * the rtnl_lock which we are holding.
- */
- while (tp->tg3_flags & TG3_FLAG_IN_RESET_TASK)
- msleep(1);
+ cancel_work_sync(&tp->reset_task);
netif_stop_queue(dev);
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index 4d334cf5a24..bd9f4f428e5 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -2228,7 +2228,7 @@ struct tg3 {
#define TG3_FLAG_JUMBO_RING_ENABLE 0x00800000
#define TG3_FLAG_10_100_ONLY 0x01000000
#define TG3_FLAG_PAUSE_AUTONEG 0x02000000
-#define TG3_FLAG_IN_RESET_TASK 0x04000000
+
#define TG3_FLAG_40BIT_DMA_BUG 0x08000000
#define TG3_FLAG_BROKEN_CHECKSUMS 0x10000000
#define TG3_FLAG_SUPPORT_MSI 0x20000000
diff --git a/drivers/net/tulip/interrupt.c b/drivers/net/tulip/interrupt.c
index 9b08afbd1f6..ea896777bca 100644
--- a/drivers/net/tulip/interrupt.c
+++ b/drivers/net/tulip/interrupt.c
@@ -269,7 +269,7 @@ done:
This would turn on IM for devices that is not contributing
to backlog congestion with unnecessary latency.
- We monitor the the device RX-ring and have:
+ We monitor the device RX-ring and have:
HW Interrupt Mitigation either ON or OFF.
diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
index fa440706fb4..38f3b99716b 100644
--- a/drivers/net/tulip/winbond-840.c
+++ b/drivers/net/tulip/winbond-840.c
@@ -1021,7 +1021,7 @@ static int start_tx(struct sk_buff *skb, struct net_device *dev)
np->tx_ring[entry].length |= DescEndRing;
/* Now acquire the irq spinlock.
- * The difficult race is the the ordering between
+ * The difficult race is the ordering between
* increasing np->cur_tx and setting DescOwned:
* - if np->cur_tx is increased first the interrupt
* handler could consider the packet as transmitted
diff --git a/drivers/net/tulip/xircom_cb.c b/drivers/net/tulip/xircom_cb.c
index 985a1810ca5..2470b1ee33c 100644
--- a/drivers/net/tulip/xircom_cb.c
+++ b/drivers/net/tulip/xircom_cb.c
@@ -1043,7 +1043,7 @@ static int enable_promisc(struct xircom_private *card)
/*
-link_status() checks the the links status and will return 0 for no link, 10 for 10mbit link and 100 for.. guess what.
+link_status() checks the links status and will return 0 for no link, 10 for 10mbit link and 100 for.. guess what.
Must be called in locked state with interrupts disabled
*/
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index f2dd7763cd0..f7257359412 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -639,7 +639,7 @@ typhoon_issue_command(struct typhoon *tp, int num_cmd, struct cmd_desc *cmd,
typhoon_inc_cmd_index(&ring->lastWrite, num_cmd);
- /* "I feel a presence... another warrior is on the the mesa."
+ /* "I feel a presence... another warrior is on the mesa."
*/
wmb();
iowrite32(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY);
diff --git a/drivers/usb/net/Kconfig b/drivers/net/usb/Kconfig
index 3de564b2314..3de564b2314 100644
--- a/drivers/usb/net/Kconfig
+++ b/drivers/net/usb/Kconfig
diff --git a/drivers/usb/net/Makefile b/drivers/net/usb/Makefile
index 595a539f838..595a539f838 100644
--- a/drivers/usb/net/Makefile
+++ b/drivers/net/usb/Makefile
diff --git a/drivers/usb/net/asix.c b/drivers/net/usb/asix.c
index d5ef97bc4d0..d5ef97bc4d0 100644
--- a/drivers/usb/net/asix.c
+++ b/drivers/net/usb/asix.c
diff --git a/drivers/usb/net/catc.c b/drivers/net/usb/catc.c
index 86e90c59d55..86e90c59d55 100644
--- a/drivers/usb/net/catc.c
+++ b/drivers/net/usb/catc.c
diff --git a/drivers/usb/net/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 5a21f06bf8a..5a21f06bf8a 100644
--- a/drivers/usb/net/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
diff --git a/drivers/usb/net/cdc_subset.c b/drivers/net/usb/cdc_subset.c
index bc62b012602..bc62b012602 100644
--- a/drivers/usb/net/cdc_subset.c
+++ b/drivers/net/usb/cdc_subset.c
diff --git a/drivers/usb/net/dm9601.c b/drivers/net/usb/dm9601.c
index a6763860147..a6763860147 100644
--- a/drivers/usb/net/dm9601.c
+++ b/drivers/net/usb/dm9601.c
diff --git a/drivers/usb/net/gl620a.c b/drivers/net/usb/gl620a.c
index 031cf5ca4db..031cf5ca4db 100644
--- a/drivers/usb/net/gl620a.c
+++ b/drivers/net/usb/gl620a.c
diff --git a/drivers/usb/net/kaweth.c b/drivers/net/usb/kaweth.c
index 60d29440f31..60d29440f31 100644
--- a/drivers/usb/net/kaweth.c
+++ b/drivers/net/usb/kaweth.c
diff --git a/drivers/usb/net/kawethfw.h b/drivers/net/usb/kawethfw.h
index cf85fcb0d1a..cf85fcb0d1a 100644
--- a/drivers/usb/net/kawethfw.h
+++ b/drivers/net/usb/kawethfw.h
diff --git a/drivers/usb/net/mcs7830.c b/drivers/net/usb/mcs7830.c
index 6240b978fe3..6240b978fe3 100644
--- a/drivers/usb/net/mcs7830.c
+++ b/drivers/net/usb/mcs7830.c
diff --git a/drivers/usb/net/net1080.c b/drivers/net/usb/net1080.c
index 19bf8dae70c..19bf8dae70c 100644
--- a/drivers/usb/net/net1080.c
+++ b/drivers/net/usb/net1080.c
diff --git a/drivers/usb/net/pegasus.c b/drivers/net/usb/pegasus.c
index a05fd97e5bc..a05fd97e5bc 100644
--- a/drivers/usb/net/pegasus.c
+++ b/drivers/net/usb/pegasus.c
diff --git a/drivers/usb/net/pegasus.h b/drivers/net/usb/pegasus.h
index c7467823cd1..c7467823cd1 100644
--- a/drivers/usb/net/pegasus.h
+++ b/drivers/net/usb/pegasus.h
diff --git a/drivers/usb/net/plusb.c b/drivers/net/usb/plusb.c
index 45300939d18..45300939d18 100644
--- a/drivers/usb/net/plusb.c
+++ b/drivers/net/usb/plusb.c
diff --git a/drivers/usb/net/rndis_host.c b/drivers/net/usb/rndis_host.c
index 980e4aaa97a..980e4aaa97a 100644
--- a/drivers/usb/net/rndis_host.c
+++ b/drivers/net/usb/rndis_host.c
diff --git a/drivers/usb/net/rtl8150.c b/drivers/net/usb/rtl8150.c
index fa598f0340c..fa598f0340c 100644
--- a/drivers/usb/net/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
diff --git a/drivers/usb/net/usbnet.c b/drivers/net/usb/usbnet.c
index f9cd42d058b..f9cd42d058b 100644
--- a/drivers/usb/net/usbnet.c
+++ b/drivers/net/usb/usbnet.c
diff --git a/drivers/usb/net/usbnet.h b/drivers/net/usb/usbnet.h
index cbb53e065d6..82db5a8e528 100644
--- a/drivers/usb/net/usbnet.h
+++ b/drivers/net/usb/usbnet.h
@@ -129,7 +129,7 @@ extern void usbnet_disconnect(struct usb_interface *);
/* Drivers that reuse some of the standard USB CDC infrastructure
- * (notably, using multiple interfaces according to the the CDC
+ * (notably, using multiple interfaces according to the CDC
* union descriptor) get some helper code.
*/
struct cdc_state {
diff --git a/drivers/usb/net/zaurus.c b/drivers/net/usb/zaurus.c
index 9f98e8ce487..9f98e8ce487 100644
--- a/drivers/usb/net/zaurus.c
+++ b/drivers/net/usb/zaurus.c
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index e273347dc60..e3f5bb0fe60 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -3,6 +3,7 @@
#
menu "Wireless LAN"
+ depends on !S390
config WLAN_PRE80211
bool "Wireless LAN (pre-802.11)"
diff --git a/drivers/net/wireless/airport.c b/drivers/net/wireless/airport.c
index 38fac3bbcd8..7d5b8c2cc61 100644
--- a/drivers/net/wireless/airport.c
+++ b/drivers/net/wireless/airport.c
@@ -149,7 +149,7 @@ static int airport_hard_reset(struct orinoco_private *priv)
/* Vitally important. If we don't do this it seems we get an
* interrupt somewhere during the power cycle, since
* hw_unavailable is already set it doesn't get ACKed, we get
- * into an interrupt loop and the the PMU decides to turn us
+ * into an interrupt loop and the PMU decides to turn us
* off. */
disable_irq(dev->irq);
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx.h b/drivers/net/wireless/bcm43xx/bcm43xx.h
index f8483c179e4..10e07e86542 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx.h
+++ b/drivers/net/wireless/bcm43xx/bcm43xx.h
@@ -658,12 +658,6 @@ struct bcm43xx_pio {
#define BCM43xx_MAX_80211_CORES 2
-#ifdef CONFIG_BCM947XX
-#define core_offset(bcm) (bcm)->current_core_offset
-#else
-#define core_offset(bcm) 0
-#endif
-
/* Generic information about a core. */
struct bcm43xx_coreinfo {
u8 available:1,
@@ -789,10 +783,6 @@ struct bcm43xx_private {
/* The currently active core. */
struct bcm43xx_coreinfo *current_core;
-#ifdef CONFIG_BCM947XX
- /** current core memory offset */
- u32 current_core_offset;
-#endif
struct bcm43xx_coreinfo *active_80211_core;
/* coreinfo structs for all possible cores follow.
* Note that a core might not exist.
@@ -943,25 +933,25 @@ struct bcm43xx_lopair * bcm43xx_get_lopair(struct bcm43xx_phyinfo *phy,
static inline
u16 bcm43xx_read16(struct bcm43xx_private *bcm, u16 offset)
{
- return ioread16(bcm->mmio_addr + core_offset(bcm) + offset);
+ return ioread16(bcm->mmio_addr + offset);
}
static inline
void bcm43xx_write16(struct bcm43xx_private *bcm, u16 offset, u16 value)
{
- iowrite16(value, bcm->mmio_addr + core_offset(bcm) + offset);
+ iowrite16(value, bcm->mmio_addr + offset);
}
static inline
u32 bcm43xx_read32(struct bcm43xx_private *bcm, u16 offset)
{
- return ioread32(bcm->mmio_addr + core_offset(bcm) + offset);
+ return ioread32(bcm->mmio_addr + offset);
}
static inline
void bcm43xx_write32(struct bcm43xx_private *bcm, u16 offset, u32 value)
{
- iowrite32(value, bcm->mmio_addr + core_offset(bcm) + offset);
+ iowrite32(value, bcm->mmio_addr + offset);
}
static inline
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_dma.c b/drivers/net/wireless/bcm43xx/bcm43xx_dma.c
index e3d2e61a31e..1f7731fcfbd 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_dma.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_dma.c
@@ -660,10 +660,6 @@ struct bcm43xx_dmaring * bcm43xx_setup_dmaring(struct bcm43xx_private *bcm,
ring->routing = BCM43xx_DMA32_CLIENTTRANS;
if (dma64)
ring->routing = BCM43xx_DMA64_CLIENTTRANS;
-#ifdef CONFIG_BCM947XX
- if (bcm->pci_dev->bus->number == 0)
- ring->routing = dma64 ? BCM43xx_DMA64_NOTRANS : BCM43xx_DMA32_NOTRANS;
-#endif
ring->bcm = bcm;
ring->nr_slots = nr_slots;
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_main.c b/drivers/net/wireless/bcm43xx/bcm43xx_main.c
index 5e96bca6730..ef6b253a92c 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_main.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_main.c
@@ -61,10 +61,6 @@ MODULE_AUTHOR("Stefano Brivio");
MODULE_AUTHOR("Michael Buesch");
MODULE_LICENSE("GPL");
-#ifdef CONFIG_BCM947XX
-extern char *nvram_get(char *name);
-#endif
-
#if defined(CONFIG_BCM43XX_DMA) && defined(CONFIG_BCM43XX_PIO)
static int modparam_pio;
module_param_named(pio, modparam_pio, int, 0444);
@@ -142,10 +138,6 @@ MODULE_PARM_DESC(fwpostfix, "Postfix for .fw files. Useful for using multiple fi
{ PCI_VENDOR_ID_BROADCOM, 0x4324, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
/* Broadcom 43XG 802.11b/g */
{ PCI_VENDOR_ID_BROADCOM, 0x4325, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
-#ifdef CONFIG_BCM947XX
- /* SB bus on BCM947xx */
- { PCI_VENDOR_ID_BROADCOM, 0x0800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
-#endif
{ 0 },
};
MODULE_DEVICE_TABLE(pci, bcm43xx_pci_tbl);
@@ -786,9 +778,6 @@ static int bcm43xx_sprom_extract(struct bcm43xx_private *bcm)
{
u16 value;
u16 *sprom;
-#ifdef CONFIG_BCM947XX
- char *c;
-#endif
sprom = kzalloc(BCM43xx_SPROM_SIZE * sizeof(u16),
GFP_KERNEL);
@@ -796,28 +785,7 @@ static int bcm43xx_sprom_extract(struct bcm43xx_private *bcm)
printk(KERN_ERR PFX "sprom_extract OOM\n");
return -ENOMEM;
}
-#ifdef CONFIG_BCM947XX
- sprom[BCM43xx_SPROM_BOARDFLAGS2] = atoi(nvram_get("boardflags2"));
- sprom[BCM43xx_SPROM_BOARDFLAGS] = atoi(nvram_get("boardflags"));
-
- if ((c = nvram_get("il0macaddr")) != NULL)
- e_aton(c, (char *) &(sprom[BCM43xx_SPROM_IL0MACADDR]));
-
- if ((c = nvram_get("et1macaddr")) != NULL)
- e_aton(c, (char *) &(sprom[BCM43xx_SPROM_ET1MACADDR]));
-
- sprom[BCM43xx_SPROM_PA0B0] = atoi(nvram_get("pa0b0"));
- sprom[BCM43xx_SPROM_PA0B1] = atoi(nvram_get("pa0b1"));
- sprom[BCM43xx_SPROM_PA0B2] = atoi(nvram_get("pa0b2"));
-
- sprom[BCM43xx_SPROM_PA1B0] = atoi(nvram_get("pa1b0"));
- sprom[BCM43xx_SPROM_PA1B1] = atoi(nvram_get("pa1b1"));
- sprom[BCM43xx_SPROM_PA1B2] = atoi(nvram_get("pa1b2"));
-
- sprom[BCM43xx_SPROM_BOARDREV] = atoi(nvram_get("boardrev"));
-#else
bcm43xx_sprom_read(bcm, sprom);
-#endif
/* boardflags2 */
value = sprom[BCM43xx_SPROM_BOARDFLAGS2];
@@ -1225,12 +1193,6 @@ static int _switch_core(struct bcm43xx_private *bcm, int core)
goto error;
udelay(10);
}
-#ifdef CONFIG_BCM947XX
- if (bcm->pci_dev->bus->number == 0)
- bcm->current_core_offset = 0x1000 * core;
- else
- bcm->current_core_offset = 0;
-#endif
return 0;
error:
@@ -1387,19 +1349,6 @@ void bcm43xx_wireless_core_reset(struct bcm43xx_private *bcm, int connect_phy)
if ((bcm43xx_core_enabled(bcm)) &&
!bcm43xx_using_pio(bcm)) {
-//FIXME: Do we _really_ want #ifndef CONFIG_BCM947XX here?
-#if 0
-#ifndef CONFIG_BCM947XX
- /* reset all used DMA controllers. */
- bcm43xx_dmacontroller_tx_reset(bcm, BCM43xx_MMIO_DMA1_BASE);
- bcm43xx_dmacontroller_tx_reset(bcm, BCM43xx_MMIO_DMA2_BASE);
- bcm43xx_dmacontroller_tx_reset(bcm, BCM43xx_MMIO_DMA3_BASE);
- bcm43xx_dmacontroller_tx_reset(bcm, BCM43xx_MMIO_DMA4_BASE);
- bcm43xx_dmacontroller_rx_reset(bcm, BCM43xx_MMIO_DMA1_BASE);
- if (bcm->current_core->rev < 5)
- bcm43xx_dmacontroller_rx_reset(bcm, BCM43xx_MMIO_DMA4_BASE);
-#endif
-#endif
}
if (bcm43xx_status(bcm) == BCM43xx_STAT_SHUTTINGDOWN) {
bcm43xx_write32(bcm, BCM43xx_MMIO_STATUS_BITFIELD,
@@ -2140,32 +2089,11 @@ out:
return err;
}
-#ifdef CONFIG_BCM947XX
-static struct pci_device_id bcm43xx_47xx_ids[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4324) },
- { 0 }
-};
-#endif
-
static int bcm43xx_initialize_irq(struct bcm43xx_private *bcm)
{
int err;
bcm->irq = bcm->pci_dev->irq;
-#ifdef CONFIG_BCM947XX
- if (bcm->pci_dev->bus->number == 0) {
- struct pci_dev *d;
- struct pci_device_id *id;
- for (id = bcm43xx_47xx_ids; id->vendor; id++) {
- d = pci_get_device(id->vendor, id->device, NULL);
- if (d != NULL) {
- bcm->irq = d->irq;
- pci_dev_put(d);
- break;
- }
- }
- }
-#endif
err = request_irq(bcm->irq, bcm43xx_interrupt_handler,
IRQF_SHARED, KBUILD_MODNAME, bcm);
if (err)
@@ -2645,10 +2573,6 @@ static int bcm43xx_probe_cores(struct bcm43xx_private *bcm)
chip_id_16 = 0x4610;
else if ((pci_device >= 0x4710) && (pci_device <= 0x4715))
chip_id_16 = 0x4710;
-#ifdef CONFIG_BCM947XX
- else if ((pci_device >= 0x4320) && (pci_device <= 0x4325))
- chip_id_16 = 0x4309;
-#endif
else {
printk(KERN_ERR PFX "Could not determine Chip ID\n");
return -ENODEV;
@@ -4144,11 +4068,6 @@ static int __devinit bcm43xx_init_one(struct pci_dev *pdev,
struct bcm43xx_private *bcm;
int err;
-#ifdef CONFIG_BCM947XX
- if ((pdev->bus->number == 0) && (pdev->device != 0x0800))
- return -ENODEV;
-#endif
-
#ifdef DEBUG_SINGLE_DEVICE_ONLY
if (strcmp(pci_name(pdev), DEBUG_SINGLE_DEVICE_ONLY))
return -ENODEV;
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_main.h b/drivers/net/wireless/bcm43xx/bcm43xx_main.h
index f76357178e4..c8f3c532bab 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_main.h
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_main.h
@@ -33,25 +33,6 @@
#include "bcm43xx.h"
-#ifdef CONFIG_BCM947XX
-#define atoi(str) simple_strtoul(((str != NULL) ? str : ""), NULL, 0)
-
-static inline void e_aton(char *str, char *dest)
-{
- int i = 0;
- u16 *d = (u16 *) dest;
-
- for (;;) {
- dest[i++] = (char) simple_strtoul(str, NULL, 16);
- str += 2;
- if (!*str++ || i == 6)
- break;
- }
- for (i = 0; i < 3; i++)
- d[i] = cpu_to_be16(d[i]);
-}
-#endif
-
#define P4D_BYT3S(magic, nr_bytes) u8 __p4dding##magic[nr_bytes]
#define P4D_BYTES(line, nr_bytes) P4D_BYT3S(line, nr_bytes)
/* Magic helper macro to pad structures. Ignore those above. It's magic. */
diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/prism54/isl_ioctl.c
index 841b3c136ad..283be4a7052 100644
--- a/drivers/net/wireless/prism54/isl_ioctl.c
+++ b/drivers/net/wireless/prism54/isl_ioctl.c
@@ -3054,7 +3054,7 @@ static const iw_handler prism54_handler[] = {
(iw_handler) prism54_set_wap, /* SIOCSIWAP */
(iw_handler) prism54_get_wap, /* SIOCGIWAP */
(iw_handler) NULL, /* -- hole -- */
- (iw_handler) NULL, /* SIOCGIWAPLIST depreciated */
+ (iw_handler) NULL, /* SIOCGIWAPLIST deprecated */
(iw_handler) prism54_set_scan, /* SIOCSIWSCAN */
(iw_handler) prism54_get_scan, /* SIOCGIWSCAN */
(iw_handler) prism54_set_essid, /* SIOCSIWESSID */
diff --git a/drivers/net/wireless/prism54/islpci_dev.c b/drivers/net/wireless/prism54/islpci_dev.c
index a037b11dac9..084795355b7 100644
--- a/drivers/net/wireless/prism54/islpci_dev.c
+++ b/drivers/net/wireless/prism54/islpci_dev.c
@@ -115,7 +115,7 @@ isl_upload_firmware(islpci_private *priv)
ISL38XX_MEMORY_WINDOW_SIZE : fw_len;
u32 __iomem *dev_fw_ptr = device_base + ISL38XX_DIRECT_MEM_WIN;
- /* set the cards base address for writting the data */
+ /* set the card's base address for writing the data */
isl38xx_w32_flush(device_base, reg,
ISL38XX_DIR_MEM_BASE_REG);
wmb(); /* be paranoid */
diff --git a/drivers/net/wireless/wavelan_cs.c b/drivers/net/wireless/wavelan_cs.c
index 67b867f837c..5740d4d4267 100644
--- a/drivers/net/wireless/wavelan_cs.c
+++ b/drivers/net/wireless/wavelan_cs.c
@@ -176,7 +176,7 @@ psa_write(struct net_device * dev,
volatile u_char __iomem *verify = lp->mem + PSA_ADDR +
(psaoff(0, psa_comp_number) << 1);
- /* Authorize writting to PSA */
+ /* Authorize writing to PSA */
hacr_write(base, HACR_PWR_STAT | HACR_ROM_WEN);
while(n-- > 0)
@@ -1676,7 +1676,7 @@ wv_set_frequency(u_long base, /* i/o port of the card */
fee_write(base, 0x60,
dac, 2);
- /* We now should verify here that the EEprom writting was ok */
+ /* We now should verify here that the EEprom writing was ok */
/* ReRead the first area */
fee_read(base, 0x00,
diff --git a/drivers/net/wireless/wavelan_cs.p.h b/drivers/net/wireless/wavelan_cs.p.h
index 4d1c4905c74..4b9de0093a7 100644
--- a/drivers/net/wireless/wavelan_cs.p.h
+++ b/drivers/net/wireless/wavelan_cs.p.h
@@ -120,7 +120,7 @@
* the Wavelan itself (NCR -> AT&T -> Lucent).
*
* All started with Anders Klemets <klemets@paul.rutgers.edu>,
- * writting a Wavelan ISA driver for the MACH microkernel. Girish
+ * writing a Wavelan ISA driver for the MACH microkernel. Girish
* Welling <welling@paul.rutgers.edu> had also worked on it.
* Keith Moore modify this for the Pcmcia hardware.
*
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index e04cffc8adf..8459549d0ce 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -40,6 +40,7 @@ static struct usb_device_id usb_ids[] = {
{ USB_DEVICE(0x126f, 0xa006), .driver_info = DEVICE_ZD1211 },
{ USB_DEVICE(0x6891, 0xa727), .driver_info = DEVICE_ZD1211 },
{ USB_DEVICE(0x0df6, 0x9071), .driver_info = DEVICE_ZD1211 },
+ { USB_DEVICE(0x0df6, 0x9075), .driver_info = DEVICE_ZD1211 },
{ USB_DEVICE(0x157e, 0x300b), .driver_info = DEVICE_ZD1211 },
{ USB_DEVICE(0x079b, 0x004a), .driver_info = DEVICE_ZD1211 },
{ USB_DEVICE(0x1740, 0x2000), .driver_info = DEVICE_ZD1211 },
@@ -67,8 +68,11 @@ static struct usb_device_id usb_ids[] = {
{ USB_DEVICE(0x0586, 0x3410), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x0baf, 0x0121), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x0586, 0x3412), .driver_info = DEVICE_ZD1211B },
+ { USB_DEVICE(0x0586, 0x3413), .driver_info = DEVICE_ZD1211B },
+ { USB_DEVICE(0x0053, 0x5301), .driver_info = DEVICE_ZD1211B },
/* "Driverless" devices that need ejecting */
{ USB_DEVICE(0x0ace, 0x2011), .driver_info = DEVICE_INSTALLER },
+ { USB_DEVICE(0x0ace, 0x20ff), .driver_info = DEVICE_INSTALLER },
{}
};
diff --git a/drivers/net/yellowfin.c b/drivers/net/yellowfin.c
index 3f4a7cf9efe..f2a90a7fa2d 100644
--- a/drivers/net/yellowfin.c
+++ b/drivers/net/yellowfin.c
@@ -109,7 +109,6 @@ static int gx_fix;
/* These identify the driver base version and may not be removed. */
static char version[] __devinitdata =
KERN_INFO DRV_NAME ".c:v1.05 1/09/2001 Written by Donald Becker <becker@scyld.com>\n"
-KERN_INFO " http://www.scyld.com/network/yellowfin.html\n"
KERN_INFO " (unofficial 2.4.x port, " DRV_VERSION ", " DRV_RELDATE ")\n";
MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
diff --git a/drivers/parport/Kconfig b/drivers/parport/Kconfig
index 36c6a1bfe55..f46c69e4ed8 100644
--- a/drivers/parport/Kconfig
+++ b/drivers/parport/Kconfig
@@ -6,6 +6,7 @@
#
menu "Parallel port support"
+ depends on HAS_IOMEM
config PARPORT
tristate "Parallel port support"
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 3bb7739d26a..8e58ea3d95c 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -119,7 +119,7 @@ static inline int pci_create_newid_file(struct pci_driver *drv)
* system is in its list of supported devices. Returns the matching
* pci_device_id structure or %NULL if there is no match.
*
- * Depreciated, don't use this as it will not catch any dynamic ids
+ * Deprecated, don't use this as it will not catch any dynamic ids
* that a driver might want to check for.
*/
const struct pci_device_id *pci_match_id(const struct pci_device_id *ids,
diff --git a/drivers/pnp/Kconfig b/drivers/pnp/Kconfig
index c5143201419..1959cef8e9d 100644
--- a/drivers/pnp/Kconfig
+++ b/drivers/pnp/Kconfig
@@ -3,6 +3,7 @@
#
menu "Plug and Play support"
+ depends on HAS_IOMEM
config PNP
bool "Plug and Play support"
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 5e439836db2..1759baad439 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -3,6 +3,7 @@
#
menu "Real Time Clock"
+ depends on !S390
config RTC_LIB
tristate
@@ -98,7 +99,7 @@ config RTC_INTF_DEV_UIE_EMUL
bool "RTC UIE emulation on dev interface"
depends on RTC_INTF_DEV
help
- Provides an emulation for RTC_UIE if the underlaying rtc chip
+ Provides an emulation for RTC_UIE if the underlying rtc chip
driver does not expose RTC_UIE ioctls. Those requests generate
once-per-second update interrupts, used for synchronization.
diff --git a/drivers/rtc/rtc-sh.c b/drivers/rtc/rtc-sh.c
index 6abf4811958..e0f91dfce0f 100644
--- a/drivers/rtc/rtc-sh.c
+++ b/drivers/rtc/rtc-sh.c
@@ -104,7 +104,7 @@ static irqreturn_t sh_rtc_interrupt(int irq, void *dev_id)
writeb(tmp, rtc->regbase + RCR1);
- rtc_update_irq(&rtc->rtc_dev, 1, events);
+ rtc_update_irq(rtc->rtc_dev, 1, events);
spin_unlock(&rtc->lock);
@@ -139,7 +139,7 @@ static irqreturn_t sh_rtc_alarm(int irq, void *dev_id)
rtc->rearm_aie = 1;
- rtc_update_irq(&rtc->rtc_dev, 1, events);
+ rtc_update_irq(rtc->rtc_dev, 1, events);
}
spin_unlock(&rtc->lock);
@@ -153,7 +153,7 @@ static irqreturn_t sh_rtc_periodic(int irq, void *dev_id)
spin_lock(&rtc->lock);
- rtc_update_irq(&rtc->rtc_dev, 1, RTC_PF | RTC_IRQF);
+ rtc_update_irq(rtc->rtc_dev, 1, RTC_PF | RTC_IRQF);
spin_unlock(&rtc->lock);
@@ -341,7 +341,7 @@ static int sh_rtc_read_time(struct device *dev, struct rtc_time *tm)
tm->tm_sec--;
#endif
- dev_dbg(&dev, "%s: tm is secs=%d, mins=%d, hours=%d, "
+ dev_dbg(dev, "%s: tm is secs=%d, mins=%d, hours=%d, "
"mday=%d, mon=%d, year=%d, wday=%d\n",
__FUNCTION__,
tm->tm_sec, tm->tm_min, tm->tm_hour,
diff --git a/drivers/s390/block/Kconfig b/drivers/s390/block/Kconfig
index b250c535450..e879b212cf4 100644
--- a/drivers/s390/block/Kconfig
+++ b/drivers/s390/block/Kconfig
@@ -1,11 +1,9 @@
-if S390 && BLOCK
-
comment "S/390 block device drivers"
- depends on S390
+ depends on S390 && BLOCK
config BLK_DEV_XPRAM
tristate "XPRAM disk support"
- depends on S390
+ depends on S390 && BLOCK
help
Select this option if you want to use your expanded storage on S/390
or zSeries as a disk. This is useful as a _fast_ swap device if you
@@ -15,12 +13,13 @@ config BLK_DEV_XPRAM
config DCSSBLK
tristate "DCSSBLK support"
+ depends on S390 && BLOCK
help
Support for dcss block device
config DASD
tristate "Support for DASD devices"
- depends on CCW
+ depends on CCW && BLOCK
help
Enable this option if you want to access DASDs directly utilizing
S/390s channel subsystem commands. This is necessary for running
@@ -62,5 +61,3 @@ config DASD_EER
This driver provides a character device interface to the
DASD extended error reporting. This is only needed if you want to
use applications written for the EER facility.
-
-endif
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 977521013fe..bfeca57098f 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -2174,9 +2174,10 @@ dasd_generic_notify(struct ccw_device *cdev, int event)
return ret;
}
-struct dasd_ccw_req * dasd_generic_build_rdc(struct dasd_device *device,
- void *rdc_buffer,
- int rdc_buffer_size, char *magic)
+static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
+ void *rdc_buffer,
+ int rdc_buffer_size,
+ char *magic)
{
struct dasd_ccw_req *cqr;
struct ccw1 *ccw;
@@ -2219,6 +2220,7 @@ int dasd_generic_read_dev_chars(struct dasd_device *device, char *magic,
dasd_sfree_request(cqr, cqr->device);
return ret;
}
+EXPORT_SYMBOL_GPL(dasd_generic_read_dev_chars);
static int __init
dasd_init(void)
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index e810e4a44ed..eccac1c3b71 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -50,6 +50,7 @@ struct dasd_diag_private {
struct dasd_diag_rw_io iob;
struct dasd_diag_init_io iib;
blocknum_t pt_block;
+ struct ccw_dev_id dev_id;
};
struct dasd_diag_req {
@@ -102,7 +103,7 @@ mdsk_init_io(struct dasd_device *device, unsigned int blocksize,
iib = &private->iib;
memset(iib, 0, sizeof (struct dasd_diag_init_io));
- iib->dev_nr = _ccw_device_get_device_number(device->cdev);
+ iib->dev_nr = private->dev_id.devno;
iib->block_size = blocksize;
iib->offset = offset;
iib->flaga = DASD_DIAG_FLAGA_DEFAULT;
@@ -127,7 +128,7 @@ mdsk_term_io(struct dasd_device * device)
private = (struct dasd_diag_private *) device->private;
iib = &private->iib;
memset(iib, 0, sizeof (struct dasd_diag_init_io));
- iib->dev_nr = _ccw_device_get_device_number(device->cdev);
+ iib->dev_nr = private->dev_id.devno;
rc = dia250(iib, TERM_BIO);
return rc;
}
@@ -166,7 +167,7 @@ dasd_start_diag(struct dasd_ccw_req * cqr)
private = (struct dasd_diag_private *) device->private;
dreq = (struct dasd_diag_req *) cqr->data;
- private->iob.dev_nr = _ccw_device_get_device_number(device->cdev);
+ private->iob.dev_nr = private->dev_id.devno;
private->iob.key = 0;
private->iob.flags = DASD_DIAG_RWFLAG_ASYNC;
private->iob.block_count = dreq->block_count;
@@ -323,11 +324,12 @@ dasd_diag_check_device(struct dasd_device *device)
"memory allocation failed for private data");
return -ENOMEM;
}
+ ccw_device_get_id(device->cdev, &private->dev_id);
device->private = (void *) private;
}
/* Read Device Characteristics */
rdc_data = (void *) &(private->rdc_data);
- rdc_data->dev_nr = _ccw_device_get_device_number(device->cdev);
+ rdc_data->dev_nr = private->dev_id.devno;
rdc_data->rdc_len = sizeof (struct dasd_diag_characteristics);
rc = diag210((struct diag210 *) rdc_data);
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index c9583fbc2a7..418b4e63a4f 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -450,9 +450,9 @@ dasd_eckd_generate_uid(struct dasd_device *device, struct dasd_uid *uid)
return 0;
}
-struct dasd_ccw_req * dasd_eckd_build_rcd_lpm(struct dasd_device *device,
- void *rcd_buffer,
- struct ciw *ciw, __u8 lpm)
+static struct dasd_ccw_req *dasd_eckd_build_rcd_lpm(struct dasd_device *device,
+ void *rcd_buffer,
+ struct ciw *ciw, __u8 lpm)
{
struct dasd_ccw_req *cqr;
struct ccw1 *ccw;
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
index 758cfb54286..672eb0a3dd0 100644
--- a/drivers/s390/block/dasd_ioctl.c
+++ b/drivers/s390/block/dasd_ioctl.c
@@ -255,6 +255,7 @@ dasd_ioctl_information(struct dasd_device *device,
unsigned long flags;
int rc;
struct ccw_device *cdev;
+ struct ccw_dev_id dev_id;
if (!device->discipline->fill_info)
return -EINVAL;
@@ -270,8 +271,9 @@ dasd_ioctl_information(struct dasd_device *device,
}
cdev = device->cdev;
+ ccw_device_get_id(cdev, &dev_id);
- dasd_info->devno = _ccw_device_get_device_number(device->cdev);
+ dasd_info->devno = dev_id.devno;
dasd_info->schid = _ccw_device_get_subchannel_number(device->cdev);
dasd_info->cu_type = cdev->id.cu_type;
dasd_info->cu_model = cdev->id.cu_model;
diff --git a/drivers/s390/Kconfig b/drivers/s390/char/Kconfig
index 165af398fde..66102a18432 100644
--- a/drivers/s390/Kconfig
+++ b/drivers/s390/char/Kconfig
@@ -1,69 +1,9 @@
-config CCW
- bool
- default y
-
-source "drivers/block/Kconfig"
-
-source "drivers/md/Kconfig"
-
-
-menu "Character device drivers"
-
-config UNIX98_PTYS
- bool "Unix98 PTY support"
- ---help---
- A pseudo terminal (PTY) is a software device consisting of two
- halves: a master and a slave. The slave device behaves identical to
- a physical terminal; the master device is used by a process to
- read data from and write data to the slave, thereby emulating a
- terminal. Typical programs for the master side are telnet servers
- and xterms.
-
- Linux has traditionally used the BSD-like names /dev/ptyxx for
- masters and /dev/ttyxx for slaves of pseudo terminals. This scheme
- has a number of problems. The GNU C library glibc 2.1 and later,
- however, supports the Unix98 naming standard: in order to acquire a
- pseudo terminal, a process opens /dev/ptmx; the number of the pseudo
- terminal is then made available to the process and the pseudo
- terminal slave can be accessed as /dev/pts/<number>. What was
- traditionally /dev/ttyp2 will then be /dev/pts/2, for example.
-
- The entries in /dev/pts/ are created on the fly by a virtual
- file system; therefore, if you say Y here you should say Y to
- "/dev/pts file system for Unix98 PTYs" as well.
-
- If you want to say Y here, you need to have the C library glibc 2.1
- or later (equal to libc-6.1, check with "ls -l /lib/libc.so.*").
- Read the instructions in <file:Documentation/Changes> pertaining to
- pseudo terminals. It's safe to say N.
-
-config UNIX98_PTY_COUNT
- int "Maximum number of Unix98 PTYs in use (0-2048)"
- depends on UNIX98_PTYS
- default "256"
- help
- The maximum number of Unix98 PTYs that can be used at any one time.
- The default is 256, and should be enough for desktop systems. Server
- machines which support incoming telnet/rlogin/ssh connections and/or
- serve several X terminals may want to increase this: every incoming
- connection and every xterm uses up one PTY.
-
- When not in use, each additional set of 256 PTYs occupy
- approximately 8 KB of kernel memory on 32-bit architectures.
-
-config HANGCHECK_TIMER
- tristate "Hangcheck timer"
- help
- The hangcheck-timer module detects when the system has gone
- out to lunch past a certain margin. It can reboot the system
- or merely print a warning.
-
-source "drivers/char/watchdog/Kconfig"
-
comment "S/390 character device drivers"
+ depends on S390
config TN3270
tristate "Support for locally attached 3270 terminals"
+ depends on CCW
help
Include support for IBM 3270 terminals.
@@ -88,6 +28,7 @@ config TN3270_CONSOLE
config TN3215
bool "Support for 3215 line mode terminal"
+ depends on CCW
help
Include support for IBM 3215 line-mode terminals.
@@ -99,12 +40,19 @@ config TN3215_CONSOLE
Linux system console.
config CCW_CONSOLE
- bool
- depends on TN3215_CONSOLE || TN3270_CONSOLE
- default y
-
+ bool
+ depends on TN3215_CONSOLE || TN3270_CONSOLE
+ default y
+
+config SCLP
+ bool "Support for SCLP"
+ depends on S390
+ help
+ Include support for the SCLP interface to the service element.
+
config SCLP_TTY
bool "Support for SCLP line mode terminal"
+ depends on SCLP
help
Include support for IBM SCLP line-mode terminals.
@@ -117,6 +65,7 @@ config SCLP_CONSOLE
config SCLP_VT220_TTY
bool "Support for SCLP VT220-compatible terminal"
+ depends on SCLP
help
Include support for an IBM SCLP VT220-compatible terminal.
@@ -129,6 +78,7 @@ config SCLP_VT220_CONSOLE
config SCLP_CPI
tristate "Control-Program Identification"
+ depends on SCLP
help
This option enables the hardware console interface for system
identification. This is commonly used for workload management and
@@ -140,6 +90,7 @@ config SCLP_CPI
config S390_TAPE
tristate "S/390 tape device support"
+ depends on CCW
help
Select this option if you want to access channel-attached tape
devices on IBM S/390 or zSeries.
@@ -194,6 +145,7 @@ config VMLOGRDR
config VMCP
tristate "Support for the z/VM CP interface (VM only)"
+ depends on S390
help
Select this option if you want to be able to interact with the control
program on z/VM
@@ -207,33 +159,8 @@ config MONREADER
config MONWRITER
tristate "API for writing z/VM monitor service records"
+ depends on S390
default "m"
help
Character device driver for writing z/VM monitor service records
-endmenu
-
-menu "Cryptographic devices"
-
-config ZCRYPT
- tristate "Support for PCI-attached cryptographic adapters"
- select ZCRYPT_MONOLITHIC if ZCRYPT="y"
- default "m"
- help
- Select this option if you want to use a PCI-attached cryptographic
- adapter like:
- + PCI Cryptographic Accelerator (PCICA)
- + PCI Cryptographic Coprocessor (PCICC)
- + PCI-X Cryptographic Coprocessor (PCIXCC)
- + Crypto Express2 Coprocessor (CEX2C)
- + Crypto Express2 Accelerator (CEX2A)
-
-config ZCRYPT_MONOLITHIC
- bool "Monolithic zcrypt module"
- depends on ZCRYPT="m"
- help
- Select this option if you want to have a single module z90crypt.ko
- that contains all parts of the crypto device driver (ap bus,
- request router and all the card drivers).
-
-endmenu
diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c
index 8df7b1323c0..67009bfa093 100644
--- a/drivers/s390/char/monreader.c
+++ b/drivers/s390/char/monreader.c
@@ -97,7 +97,7 @@ static u8 user_data_sever[16] = {
* Create the 8 bytes EBCDIC DCSS segment name from
* an ASCII name, incl. padding
*/
-static inline void dcss_mkname(char *ascii_name, char *ebcdic_name)
+static void dcss_mkname(char *ascii_name, char *ebcdic_name)
{
int i;
@@ -191,7 +191,7 @@ static inline u32 mon_rec_end(struct mon_msg *monmsg)
return *((u32 *) (mon_mca_start(monmsg) + monmsg->mca_offset + 8));
}
-static inline int mon_check_mca(struct mon_msg *monmsg)
+static int mon_check_mca(struct mon_msg *monmsg)
{
if ((mon_rec_end(monmsg) <= mon_rec_start(monmsg)) ||
(mon_rec_start(monmsg) < mon_dcss_start) ||
@@ -209,8 +209,8 @@ static inline int mon_check_mca(struct mon_msg *monmsg)
return 0;
}
-static inline int mon_send_reply(struct mon_msg *monmsg,
- struct mon_private *monpriv)
+static int mon_send_reply(struct mon_msg *monmsg,
+ struct mon_private *monpriv)
{
int rc;
@@ -236,7 +236,7 @@ static inline int mon_send_reply(struct mon_msg *monmsg,
return 0;
}
-static inline void mon_free_mem(struct mon_private *monpriv)
+static void mon_free_mem(struct mon_private *monpriv)
{
int i;
@@ -246,7 +246,7 @@ static inline void mon_free_mem(struct mon_private *monpriv)
kfree(monpriv);
}
-static inline struct mon_private *mon_alloc_mem(void)
+static struct mon_private *mon_alloc_mem(void)
{
int i;
struct mon_private *monpriv;
@@ -307,7 +307,7 @@ static inline void mon_next_mca(struct mon_msg *monmsg)
monmsg->pos = 0;
}
-static inline struct mon_msg *mon_next_message(struct mon_private *monpriv)
+static struct mon_msg *mon_next_message(struct mon_private *monpriv)
{
struct mon_msg *monmsg;
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index 8facd14adb7..f6ef90ee3e7 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -589,9 +589,10 @@ static int
__raw3270_size_device_vm(struct raw3270 *rp)
{
int rc, model;
+ struct ccw_dev_id dev_id;
- raw3270_init_diag210.vrdcdvno =
- _ccw_device_get_device_number(rp->cdev);
+ ccw_device_get_id(rp->cdev, &dev_id);
+ raw3270_init_diag210.vrdcdvno = dev_id.devno;
raw3270_init_diag210.vrdclen = sizeof(struct diag210);
rc = diag210(&raw3270_init_diag210);
if (rc)
diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h
index 87ac4a3ad49..dbb99d1b6f5 100644
--- a/drivers/s390/char/sclp.h
+++ b/drivers/s390/char/sclp.h
@@ -132,6 +132,9 @@ int sclp_deactivate(void);
int sclp_reactivate(void);
int sclp_service_call(sclp_cmdw_t command, void *sccb);
+int sclp_sdias_init(void);
+void sclp_sdias_exit(void);
+
/* useful inlines */
/* VM uses EBCDIC 037, LPAR+native(SE+HMC) use EBCDIC 500 */
diff --git a/drivers/s390/char/sclp_rw.c b/drivers/s390/char/sclp_rw.c
index bbd5b8b66f4..d6b06ab8118 100644
--- a/drivers/s390/char/sclp_rw.c
+++ b/drivers/s390/char/sclp_rw.c
@@ -23,7 +23,7 @@
/*
* The room for the SCCB (only for writing) is not equal to a pages size
- * (as it is specified as the maximum size in the the SCLP documentation)
+ * (as it is specified as the maximum size in the SCLP documentation)
* because of the additional data structure described above.
*/
#define MAX_SCCB_ROOM (PAGE_SIZE - sizeof(struct sclp_buffer))
diff --git a/drivers/s390/char/sclp_sdias.c b/drivers/s390/char/sclp_sdias.c
index 52283daddae..1c064976b32 100644
--- a/drivers/s390/char/sclp_sdias.c
+++ b/drivers/s390/char/sclp_sdias.c
@@ -66,9 +66,9 @@ static DEFINE_MUTEX(sdias_mutex);
static void sdias_callback(struct sclp_req *request, void *data)
{
- struct sdias_sccb *sccb;
+ struct sdias_sccb *cbsccb;
- sccb = (struct sdias_sccb *) request->sccb;
+ cbsccb = (struct sdias_sccb *) request->sccb;
sclp_req_done = 1;
wake_up(&sdias_wq); /* Inform caller, that request is complete */
TRACE("callback done\n");
@@ -229,7 +229,7 @@ out:
return rc;
}
-int __init sdias_init(void)
+int __init sclp_sdias_init(void)
{
int rc;
@@ -248,7 +248,7 @@ int __init sdias_init(void)
return 0;
}
-void __exit sdias_exit(void)
+void __exit sclp_sdias_exit(void)
{
debug_unregister(sdias_dbf);
sclp_unregister(&sclp_sdias_register);
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index 89d439316a5..66eb0688d52 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -21,6 +21,7 @@
#include <asm/debug.h>
#include <asm/processor.h>
#include <asm/irqflags.h>
+#include "sclp.h"
#define TRACE(x...) debug_sprintf_event(zcore_dbf, 1, x)
#define MSG(x...) printk( KERN_ALERT x )
@@ -564,8 +565,6 @@ static void __init zcore_header_init(int arch, struct zcore_header *hdr)
get_cpu_id(&hdr->cpu_id);
}
-extern int sdias_init(void);
-
static int __init zcore_init(void)
{
unsigned char arch;
@@ -582,7 +581,7 @@ static int __init zcore_init(void)
TRACE("wwpn: %llx\n", (unsigned long long) ipl_info.data.fcp.wwpn);
TRACE("lun: %llx\n", (unsigned long long) ipl_info.data.fcp.lun);
- rc = sdias_init();
+ rc = sclp_sdias_init();
if (rc)
goto fail;
@@ -634,12 +633,10 @@ fail:
return rc;
}
-extern void sdias_exit(void);
-
static void __exit zcore_exit(void)
{
debug_unregister(zcore_dbf);
- sdias_exit();
+ sclp_sdias_exit();
diag308(DIAG308_REL_HSA, NULL);
}
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 27c6d9e55b2..dfca0ef139f 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -191,8 +191,7 @@ static int css_register_subchannel(struct subchannel *sch)
return ret;
}
-int
-css_probe_device(struct subchannel_id schid)
+static int css_probe_device(struct subchannel_id schid)
{
int ret;
struct subchannel *sch;
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index 71fcfdc4280..ed7977531c3 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -138,9 +138,7 @@ struct css_driver {
* all css_drivers have the css_bus_type
*/
extern struct bus_type css_bus_type;
-extern struct css_driver io_subchannel_driver;
-extern int css_probe_device(struct subchannel_id);
extern int css_sch_device_register(struct subchannel *);
extern void css_sch_device_unregister(struct subchannel *);
extern struct subchannel * get_subchannel_by_schid(struct subchannel_id);
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index a23ff582db9..a8b373f69cf 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -129,7 +129,7 @@ static void io_subchannel_verify(struct device *);
static void io_subchannel_ioterm(struct device *);
static void io_subchannel_shutdown(struct subchannel *);
-struct css_driver io_subchannel_driver = {
+static struct css_driver io_subchannel_driver = {
.subchannel_type = SUBCHANNEL_TYPE_IO,
.drv = {
.name = "io_subchannel",
@@ -546,7 +546,7 @@ static struct attribute_group ccwdev_attr_group = {
.attrs = ccwdev_attrs,
};
-struct attribute_group *ccwdev_attr_groups[] = {
+static struct attribute_group *ccwdev_attr_groups[] = {
&ccwdev_attr_group,
NULL,
};
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index 16f59fcb66b..a5d263fb55a 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -616,6 +616,17 @@ ccw_device_get_chp_desc(struct ccw_device *cdev, int chp_no)
return chp_get_chp_desc(chpid);
}
+/**
+ * ccw_device_get_id - obtain a ccw device id
+ * @cdev: device to obtain the id for
+ * @dev_id: where to fill in the values
+ */
+void ccw_device_get_id(struct ccw_device *cdev, struct ccw_dev_id *dev_id)
+{
+ *dev_id = cdev->private->dev_id;
+}
+EXPORT_SYMBOL(ccw_device_get_id);
+
// FIXME: these have to go:
int
diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c
index f770018fe1d..e70aeb7a378 100644
--- a/drivers/s390/cio/qdio.c
+++ b/drivers/s390/cio/qdio.c
@@ -1983,6 +1983,7 @@ qdio_handle_pci(struct qdio_irq *irq_ptr)
if (q->is_input_q&QDIO_FLAG_NO_INPUT_INTERRUPT_CONTEXT)
qdio_mark_q(q);
else {
+ qdio_perf_stat_dec(&perf_stats.tl_runs);
__qdio_inbound_processing(q);
}
}
diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig
index f98fa465df0..eada69dec4f 100644
--- a/drivers/s390/net/Kconfig
+++ b/drivers/s390/net/Kconfig
@@ -3,7 +3,7 @@ menu "S/390 network device drivers"
config LCS
tristate "Lan Channel Station Interface"
- depends on NETDEVICES && (NET_ETHERNET || TR || FDDI)
+ depends on CCW && NETDEVICES && (NET_ETHERNET || TR || FDDI)
help
Select this option if you want to use LCS networking on IBM S/390
or zSeries. This device driver supports Token Ring (IEEE 802.5),
@@ -13,7 +13,7 @@ config LCS
config CTC
tristate "CTC device support"
- depends on NETDEVICES
+ depends on CCW && NETDEVICES
help
Select this option if you want to use channel-to-channel networking
on IBM S/390 or zSeries. This device driver supports real CTC
@@ -42,7 +42,7 @@ config SMSGIUCV
config CLAW
tristate "CLAW device support"
- depends on NETDEVICES
+ depends on CCW && NETDEVICES
help
This driver supports channel attached CLAW devices.
CLAW is Common Link Access for Workstation. Common devices
@@ -52,7 +52,7 @@ config CLAW
config QETH
tristate "Gigabit Ethernet device support"
- depends on NETDEVICES && IP_MULTICAST && QDIO
+ depends on CCW && NETDEVICES && IP_MULTICAST && QDIO
help
This driver supports the IBM S/390 and zSeries OSA Express adapters
in QDIO mode (all media types), HiperSockets interfaces and VM GuestLAN
diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c
index 29d176036e5..0b96d49dd63 100644
--- a/drivers/s390/net/qeth_main.c
+++ b/drivers/s390/net/qeth_main.c
@@ -2860,7 +2860,7 @@ qeth_flush_buffers(struct qeth_qdio_out_q *queue, int under_int,
if (!atomic_read(&queue->set_pci_flags_count)){
/*
* there's no outstanding PCI any more, so we
- * have to request a PCI to be sure the the PCI
+ * have to request a PCI to be sure that the PCI
* will wake at some time in the future then we
* can flush packed buffers that might still be
* hanging around, which can happen if no
diff --git a/drivers/s390/net/qeth_mpc.c b/drivers/s390/net/qeth_mpc.c
index f54fdfdbf06..f29a4bc4f6f 100644
--- a/drivers/s390/net/qeth_mpc.c
+++ b/drivers/s390/net/qeth_mpc.c
@@ -162,7 +162,7 @@ struct ipa_rc_msg {
char *msg;
};
-struct ipa_rc_msg qeth_ipa_rc_msg[] = {
+static struct ipa_rc_msg qeth_ipa_rc_msg[] = {
{IPA_RC_SUCCESS, "success"},
{IPA_RC_NOTSUPP, "Command not supported"},
{IPA_RC_IP_TABLE_FULL, "Add Addr IP Table Full - ipv6"},
@@ -226,7 +226,7 @@ struct ipa_cmd_names {
char *name;
};
-struct ipa_cmd_names qeth_ipa_cmd_names[] = {
+static struct ipa_cmd_names qeth_ipa_cmd_names[] = {
{IPA_CMD_STARTLAN, "startlan"},
{IPA_CMD_STOPLAN, "stoplan"},
{IPA_CMD_SETVMAC, "setvmac"},
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 324899c96ef..ddff40c4212 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -607,8 +607,7 @@ zfcp_sg_list_free(struct zfcp_sg_list *sg_list)
* @sg_count: elements in array
* Return: size of entire scatter-gather list
*/
-size_t
-zfcp_sg_size(struct scatterlist *sg, unsigned int sg_count)
+static size_t zfcp_sg_size(struct scatterlist *sg, unsigned int sg_count)
{
unsigned int i;
struct scatterlist *p;
@@ -975,8 +974,7 @@ zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter)
mempool_destroy(adapter->pool.data_gid_pn);
}
-void
-zfcp_dummy_release(struct device *dev)
+static void zfcp_dummy_release(struct device *dev)
{
return;
}
@@ -1336,7 +1334,7 @@ zfcp_nameserver_enqueue(struct zfcp_adapter *adapter)
#define ZFCP_LOG_AREA ZFCP_LOG_AREA_FC
-void
+static void
zfcp_fsf_incoming_els_rscn(struct zfcp_adapter *adapter,
struct fsf_status_read_buffer *status_buffer)
{
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index d8191d115c1..5f3212440f6 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -478,7 +478,7 @@ static struct debug_view zfcp_hba_dbf_view = {
NULL
};
-void
+static void
_zfcp_san_dbf_event_common_ct(const char *tag, struct zfcp_fsf_req *fsf_req,
u32 s_id, u32 d_id, void *buffer, int buflen)
{
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index cb08ca3cc0f..bdf5782b8a7 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -222,7 +222,7 @@ zfcp_qdio_handler_error_check(struct zfcp_adapter *adapter, unsigned int status,
* Since we have been using this adapter, it is save to assume
* that it is not failed but recoverable. The card seems to
* report link-up events by self-initiated queue shutdown.
- * That is why we need to clear the the link-down flag
+ * That is why we need to clear the link-down flag
* which is set again in case we have missed by a mile.
*/
zfcp_erp_adapter_reopen(
diff --git a/drivers/sbus/char/bpp.c b/drivers/sbus/char/bpp.c
index 74b999d77bb..4fab0c23814 100644
--- a/drivers/sbus/char/bpp.c
+++ b/drivers/sbus/char/bpp.c
@@ -156,7 +156,7 @@ static unsigned short get_pins(unsigned minor)
#define BPP_ICR 0x18
#define BPP_SIZE 0x1A
-/* BPP_CSR. Bits of type RW1 are cleared with writting '1'. */
+/* BPP_CSR. Bits of type RW1 are cleared with writing '1'. */
#define P_DEV_ID_MASK 0xf0000000 /* R */
#define P_DEV_ID_ZEBRA 0x40000000
#define P_DEV_ID_L64854 0xa0000000 /* == NCR 89C100+89C105. Pity. */
diff --git a/drivers/scsi/aic7xxx/aic79xx_pci.c b/drivers/scsi/aic7xxx/aic79xx_pci.c
index 8d72bbae96a..0bada0028aa 100644
--- a/drivers/scsi/aic7xxx/aic79xx_pci.c
+++ b/drivers/scsi/aic7xxx/aic79xx_pci.c
@@ -966,7 +966,7 @@ ahd_aic790X_setup(struct ahd_softc *ahd)
| AHD_BUSFREEREV_BUG;
ahd->bugs |= AHD_LQOOVERRUN_BUG|AHD_EARLY_REQ_BUG;
- /* If the user requested the the SLOWCRC bit to be set. */
+ /* If the user requested that the SLOWCRC bit to be set. */
if (aic79xx_slowcrc)
ahd->features |= AHD_AIC79XXB_SLOWCRC;
diff --git a/drivers/scsi/aic94xx/Makefile b/drivers/scsi/aic94xx/Makefile
index e6b70123940..e78ce0fa44d 100644
--- a/drivers/scsi/aic94xx/Makefile
+++ b/drivers/scsi/aic94xx/Makefile
@@ -6,7 +6,7 @@
#
# This file is licensed under GPLv2.
#
-# This file is part of the the aic94xx driver.
+# This file is part of the aic94xx driver.
#
# The aic94xx driver is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
index a965ed3548d..564ea90ed3a 100644
--- a/drivers/scsi/dc395x.c
+++ b/drivers/scsi/dc395x.c
@@ -541,7 +541,7 @@ static struct ParameterData __devinitdata cfg_data[] = {
/*
- * Safe settings. If set to zero the the BIOS/default values with
+ * Safe settings. If set to zero the BIOS/default values with
* command line overrides will be used. If set to 1 then safe and
* slow settings will be used.
*/
@@ -617,7 +617,7 @@ static void __devinit fix_settings(void)
/*
* Mapping from the eeprom delay index value (index into this array)
- * to the the number of actual seconds that the delay should be for.
+ * to the number of actual seconds that the delay should be for.
*/
static char __devinitdata eeprom_index_to_delay_map[] =
{ 1, 3, 5, 10, 16, 30, 60, 120 };
@@ -4136,7 +4136,7 @@ static void __devinit trms1040_write_all(struct NvRamType *eeprom, unsigned long
* @io_port: base I/O address
* @addr: offset into SEEPROM
*
- * Returns the the byte read.
+ * Returns the byte read.
**/
static u8 __devinit trms1040_get_data(unsigned long io_port, u8 addr)
{
diff --git a/drivers/scsi/ide-scsi.c b/drivers/scsi/ide-scsi.c
index 2b5b8a93bc1..8263f752809 100644
--- a/drivers/scsi/ide-scsi.c
+++ b/drivers/scsi/ide-scsi.c
@@ -721,19 +721,23 @@ static ide_startstop_t idescsi_do_request (ide_drive_t *drive, struct request *r
return ide_stopped;
}
+#ifdef CONFIG_IDE_PROC_FS
static void idescsi_add_settings(ide_drive_t *drive)
{
idescsi_scsi_t *scsi = drive_to_idescsi(drive);
/*
- * drive setting name read/write ioctl ioctl data type min max mul_factor div_factor data pointer set function
+ * drive setting name read/write data type min max mul_factor div_factor data pointer set function
*/
- ide_add_setting(drive, "bios_cyl", SETTING_RW, -1, -1, TYPE_INT, 0, 1023, 1, 1, &drive->bios_cyl, NULL);
- ide_add_setting(drive, "bios_head", SETTING_RW, -1, -1, TYPE_BYTE, 0, 255, 1, 1, &drive->bios_head, NULL);
- ide_add_setting(drive, "bios_sect", SETTING_RW, -1, -1, TYPE_BYTE, 0, 63, 1, 1, &drive->bios_sect, NULL);
- ide_add_setting(drive, "transform", SETTING_RW, -1, -1, TYPE_INT, 0, 3, 1, 1, &scsi->transform, NULL);
- ide_add_setting(drive, "log", SETTING_RW, -1, -1, TYPE_INT, 0, 1, 1, 1, &scsi->log, NULL);
+ ide_add_setting(drive, "bios_cyl", SETTING_RW, TYPE_INT, 0, 1023, 1, 1, &drive->bios_cyl, NULL);
+ ide_add_setting(drive, "bios_head", SETTING_RW, TYPE_BYTE, 0, 255, 1, 1, &drive->bios_head, NULL);
+ ide_add_setting(drive, "bios_sect", SETTING_RW, TYPE_BYTE, 0, 63, 1, 1, &drive->bios_sect, NULL);
+ ide_add_setting(drive, "transform", SETTING_RW, TYPE_INT, 0, 3, 1, 1, &scsi->transform, NULL);
+ ide_add_setting(drive, "log", SETTING_RW, TYPE_INT, 0, 1, 1, 1, &scsi->log, NULL);
}
+#else
+static inline void idescsi_add_settings(ide_drive_t *drive) { ; }
+#endif
/*
* Driver initialization.
@@ -756,7 +760,7 @@ static void ide_scsi_remove(ide_drive_t *drive)
struct ide_scsi_obj *scsi = scsihost_to_idescsi(scsihost);
struct gendisk *g = scsi->disk;
- ide_unregister_subdriver(drive, scsi->driver);
+ ide_proc_unregister_driver(drive, scsi->driver);
ide_unregister_region(g);
@@ -770,13 +774,11 @@ static void ide_scsi_remove(ide_drive_t *drive)
static int ide_scsi_probe(ide_drive_t *);
-#ifdef CONFIG_PROC_FS
+#ifdef CONFIG_IDE_PROC_FS
static ide_proc_entry_t idescsi_proc[] = {
{ "capacity", S_IFREG|S_IRUGO, proc_ide_read_capacity, NULL },
{ NULL, 0, NULL, NULL }
};
-#else
-# define idescsi_proc NULL
#endif
static ide_driver_t idescsi_driver = {
@@ -790,11 +792,13 @@ static ide_driver_t idescsi_driver = {
.version = IDESCSI_VERSION,
.media = ide_scsi,
.supports_dsc_overlap = 0,
- .proc = idescsi_proc,
.do_request = idescsi_do_request,
.end_request = idescsi_end_request,
.error = idescsi_atapi_error,
.abort = idescsi_atapi_abort,
+#ifdef CONFIG_IDE_PROC_FS
+ .proc = idescsi_proc,
+#endif
};
static int idescsi_ide_open(struct inode *inode, struct file *filp)
@@ -1153,7 +1157,7 @@ static int ide_scsi_probe(ide_drive_t *drive)
idescsi->host = host;
idescsi->disk = g;
g->private_data = &idescsi->driver;
- ide_register_subdriver(drive, &idescsi_driver);
+ ide_proc_register_driver(drive, &idescsi_driver);
err = 0;
idescsi_setup(drive, idescsi);
g->fops = &idescsi_ops;
@@ -1165,7 +1169,7 @@ static int ide_scsi_probe(ide_drive_t *drive)
}
/* fall through on error */
ide_unregister_region(g);
- ide_unregister_subdriver(drive, &idescsi_driver);
+ ide_proc_unregister_driver(drive, &idescsi_driver);
put_disk(g);
out_host_put:
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 61fbcdcbb00..1f5a07bf2a7 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -173,7 +173,7 @@ int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
* @retries: number of times to retry request
* @flags: or into request flags;
*
- * returns the req->errors value which is the the scsi_cmnd result
+ * returns the req->errors value which is the scsi_cmnd result
* field.
**/
int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
index e8efe938c4e..a6f5bfbb777 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/serial/Kconfig
@@ -5,6 +5,7 @@
#
menu "Serial drivers"
+ depends on HAS_IOMEM
#
# The new 8250/16550 serial drivers
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 07c587ec71b..7c9d37f651e 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -6,6 +6,7 @@
# fully appropriate there, so it'd need some thought to do well.
#
menu "SPI support"
+ depends on HAS_IOMEM
config SPI
bool "SPI support"
diff --git a/drivers/spi/atmel_spi.c b/drivers/spi/atmel_spi.c
index 66e7bc98579..1d8a2f6bb8e 100644
--- a/drivers/spi/atmel_spi.c
+++ b/drivers/spi/atmel_spi.c
@@ -22,10 +22,7 @@
#include <asm/io.h>
#include <asm/arch/board.h>
#include <asm/arch/gpio.h>
-
-#ifdef CONFIG_ARCH_AT91
#include <asm/arch/cpu.h>
-#endif
#include "atmel_spi.h"
@@ -552,10 +549,8 @@ static int __init atmel_spi_probe(struct platform_device *pdev)
goto out_free_buffer;
as->irq = irq;
as->clk = clk;
-#ifdef CONFIG_ARCH_AT91
if (!cpu_is_at91rm9200())
as->new_1 = 1;
-#endif
ret = request_irq(irq, atmel_spi_interrupt, 0,
pdev->dev.bus_id, master);
diff --git a/drivers/telephony/Kconfig b/drivers/telephony/Kconfig
index 7625b1816ba..dd1d6a53f3c 100644
--- a/drivers/telephony/Kconfig
+++ b/drivers/telephony/Kconfig
@@ -3,6 +3,7 @@
#
menu "Telephony Support"
+ depends on HAS_IOMEM
config PHONE
tristate "Linux telephony support"
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index b847bbc8b0e..15499b7e33f 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -3,6 +3,7 @@
#
menu "USB support"
+ depends on HAS_IOMEM
# Host-side USB depends on having a host controller
# NOTE: dummy_hcd is always an option, but it's ignored here ...
@@ -87,8 +88,6 @@ source "drivers/usb/storage/Kconfig"
source "drivers/usb/image/Kconfig"
-source "drivers/usb/net/Kconfig"
-
source "drivers/usb/mon/Kconfig"
comment "USB port drivers"
diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile
index 0ef090b1b37..72464b58699 100644
--- a/drivers/usb/Makefile
+++ b/drivers/usb/Makefile
@@ -23,13 +23,6 @@ obj-$(CONFIG_USB_PRINTER) += class/
obj-$(CONFIG_USB_STORAGE) += storage/
obj-$(CONFIG_USB) += storage/
-obj-$(CONFIG_USB_CATC) += net/
-obj-$(CONFIG_USB_KAWETH) += net/
-obj-$(CONFIG_USB_PEGASUS) += net/
-obj-$(CONFIG_USB_RTL8150) += net/
-obj-$(CONFIG_USB_USBNET) += net/
-obj-$(CONFIG_USB_ZD1201) += net/
-
obj-$(CONFIG_USB_MDC800) += image/
obj-$(CONFIG_USB_MICROTEK) += image/
diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
index b082d95bbba..11e9b15ca45 100644
--- a/drivers/usb/atm/usbatm.c
+++ b/drivers/usb/atm/usbatm.c
@@ -1033,7 +1033,7 @@ static int usbatm_do_heavy_init(void *arg)
static int usbatm_heavy_init(struct usbatm_data *instance)
{
- int ret = kernel_thread(usbatm_do_heavy_init, instance, CLONE_KERNEL);
+ int ret = kernel_thread(usbatm_do_heavy_init, instance, CLONE_FS | CLONE_FILES);
if (ret < 0) {
usb_err(instance, "%s: failed to create kernel_thread (%d)!\n", __func__, ret);
diff --git a/drivers/usb/misc/auerswald.c b/drivers/usb/misc/auerswald.c
index b5332e679c4..88fb56d5db8 100644
--- a/drivers/usb/misc/auerswald.c
+++ b/drivers/usb/misc/auerswald.c
@@ -1307,7 +1307,7 @@ static int auerswald_addservice (pauerswald_t cp, pauerscon_t scp)
}
-/* remove a service from the the device
+/* remove a service from the device
scp->id must be set! */
static void auerswald_removeservice (pauerswald_t cp, pauerscon_t scp)
{
diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig
index ba5d1dc0303..3efe67092f1 100644
--- a/drivers/usb/serial/Kconfig
+++ b/drivers/usb/serial/Kconfig
@@ -558,7 +558,7 @@ config USB_SERIAL_DEBUG
tristate "USB Debugging Device"
depends on USB_SERIAL
help
- Say Y here if you have a USB debugging device used to recieve
+ Say Y here if you have a USB debugging device used to receive
debugging data from another machine. The most common of these
devices is the NetChip TurboCONNECT device.
diff --git a/drivers/usb/serial/aircable.c b/drivers/usb/serial/aircable.c
index b675735bfbe..fbc8c27d5d9 100644
--- a/drivers/usb/serial/aircable.c
+++ b/drivers/usb/serial/aircable.c
@@ -9,7 +9,7 @@
* The device works as an standard CDC device, it has 2 interfaces, the first
* one is for firmware access and the second is the serial one.
* The protocol is very simply, there are two posibilities reading or writing.
- * When writting the first urb must have a Header that starts with 0x20 0x29 the
+ * When writing the first urb must have a Header that starts with 0x20 0x29 the
* next two bytes must say how much data will be sended.
* When reading the process is almost equal except that the header starts with
* 0x00 0x20.
@@ -18,7 +18,7 @@
* buffer: The First and Second byte is used for a Header, the Third and Fourth
* tells the device the amount of information the package holds.
* Packages are 60 bytes long Header Stuff.
- * When writting to the device the first two bytes of the header are 0x20 0x29
+ * When writing to the device the first two bytes of the header are 0x20 0x29
* When reading the bytes are 0x00 0x20, or 0x00 0x10, there is an strange
* situation, when too much data arrives to the device because it sends the data
* but with out the header. I will use a simply hack to override this situation,
diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
index 18f74ac7656..4807f960150 100644
--- a/drivers/usb/serial/io_edgeport.c
+++ b/drivers/usb/serial/io_edgeport.c
@@ -2465,7 +2465,7 @@ static int send_cmd_write_uart_register (struct edgeport_port *edge_port, __u8 r
((edge_serial->is_epic) &&
(!edge_serial->epic_descriptor.Supports.IOSPWriteMCR) &&
(regNum == MCR))) {
- dbg("SendCmdWriteUartReg - Not writting to MCR Register");
+ dbg("SendCmdWriteUartReg - Not writing to MCR Register");
return 0;
}
@@ -2473,7 +2473,7 @@ static int send_cmd_write_uart_register (struct edgeport_port *edge_port, __u8 r
((edge_serial->is_epic) &&
(!edge_serial->epic_descriptor.Supports.IOSPWriteLCR) &&
(regNum == LCR))) {
- dbg ("SendCmdWriteUartReg - Not writting to LCR Register");
+ dbg ("SendCmdWriteUartReg - Not writing to LCR Register");
return 0;
}
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 1132ba5ff39..f54438828cb 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -3,6 +3,7 @@
#
menu "Graphics support"
+ depends on HAS_IOMEM
source "drivers/video/backlight/Kconfig"
source "drivers/video/display/Kconfig"
@@ -1348,6 +1349,20 @@ config FB_VOODOO1
Please read the <file:Documentation/fb/README-sstfb.txt> for supported
options and other important info support.
+config FB_VT8623
+ tristate "VIA VT8623 support"
+ depends on FB && PCI
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
+ select FB_TILEBLITTING
+ select FB_SVGALIB
+ select VGASTATE
+ select FONT_8x16 if FRAMEBUFFER_CONSOLE
+ ---help---
+ Driver for CastleRock integrated graphics core in the
+ VIA VT8623 [Apollo CLE266] chipset.
+
config FB_CYBLA
tristate "Cyberblade/i1 support"
depends on FB && PCI && X86_32 && !64BIT
@@ -1401,6 +1416,20 @@ config FB_TRIDENT_ACCEL
This will compile the Trident frame buffer device with
acceleration functions.
+config FB_ARK
+ tristate "ARK 2000PV support"
+ depends on FB && PCI
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
+ select FB_TILEBLITTING
+ select FB_SVGALIB
+ select VGASTATE
+ select FONT_8x16 if FRAMEBUFFER_CONSOLE
+ ---help---
+ Driver for PCI graphics boards with ARK 2000PV chip
+ and ICS 5342 RAMDAC.
+
config FB_PM3
tristate "Permedia3 support"
depends on FB && PCI && BROKEN
diff --git a/drivers/video/Makefile b/drivers/video/Makefile
index a916c204274..0b70567458f 100644
--- a/drivers/video/Makefile
+++ b/drivers/video/Makefile
@@ -54,10 +54,12 @@ obj-$(CONFIG_FB_VALKYRIE) += valkyriefb.o
obj-$(CONFIG_FB_CT65550) += chipsfb.o
obj-$(CONFIG_FB_IMSTT) += imsttfb.o
obj-$(CONFIG_FB_FM2) += fm2fb.o
+obj-$(CONFIG_FB_VT8623) += vt8623fb.o
obj-$(CONFIG_FB_CYBLA) += cyblafb.o
obj-$(CONFIG_FB_TRIDENT) += tridentfb.o
obj-$(CONFIG_FB_LE80578) += vermilion/
obj-$(CONFIG_FB_S3) += s3fb.o
+obj-$(CONFIG_FB_ARK) += arkfb.o
obj-$(CONFIG_FB_STI) += stifb.o
obj-$(CONFIG_FB_FFB) += ffb.o sbuslib.o
obj-$(CONFIG_FB_CG6) += cg6.o sbuslib.o
diff --git a/drivers/video/arkfb.c b/drivers/video/arkfb.c
new file mode 100644
index 00000000000..ba6fede5c46
--- /dev/null
+++ b/drivers/video/arkfb.c
@@ -0,0 +1,1200 @@
+/*
+ * linux/drivers/video/arkfb.c -- Frame buffer device driver for ARK 2000PV
+ * with ICS 5342 dac (it is easy to add support for different dacs).
+ *
+ * Copyright (c) 2007 Ondrej Zajicek <santiago@crfreenet.org>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive for
+ * more details.
+ *
+ * Code is based on s3fb
+ */
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/tty.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/fb.h>
+#include <linux/svga.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/console.h> /* Why should fb driver call console functions? because acquire_console_sem() */
+#include <video/vga.h>
+
+#ifdef CONFIG_MTRR
+#include <asm/mtrr.h>
+#endif
+
+struct arkfb_info {
+ int mclk_freq;
+ int mtrr_reg;
+
+ struct dac_info *dac;
+ struct vgastate state;
+ struct mutex open_lock;
+ unsigned int ref_count;
+ u32 pseudo_palette[16];
+};
+
+
+/* ------------------------------------------------------------------------- */
+
+
+static const struct svga_fb_format arkfb_formats[] = {
+ { 0, {0, 6, 0}, {0, 6, 0}, {0, 6, 0}, {0, 0, 0}, 0,
+ FB_TYPE_TEXT, FB_AUX_TEXT_SVGA_STEP4, FB_VISUAL_PSEUDOCOLOR, 8, 8},
+ { 4, {0, 6, 0}, {0, 6, 0}, {0, 6, 0}, {0, 0, 0}, 0,
+ FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_PSEUDOCOLOR, 8, 16},
+ { 4, {0, 6, 0}, {0, 6, 0}, {0, 6, 0}, {0, 0, 0}, 1,
+ FB_TYPE_INTERLEAVED_PLANES, 1, FB_VISUAL_PSEUDOCOLOR, 8, 16},
+ { 8, {0, 6, 0}, {0, 6, 0}, {0, 6, 0}, {0, 0, 0}, 0,
+ FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_PSEUDOCOLOR, 8, 8},
+ {16, {10, 5, 0}, {5, 5, 0}, {0, 5, 0}, {0, 0, 0}, 0,
+ FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_TRUECOLOR, 4, 4},
+ {16, {11, 5, 0}, {5, 6, 0}, {0, 5, 0}, {0, 0, 0}, 0,
+ FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_TRUECOLOR, 4, 4},
+ {24, {16, 8, 0}, {8, 8, 0}, {0, 8, 0}, {0, 0, 0}, 0,
+ FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_TRUECOLOR, 8, 8},
+ {32, {16, 8, 0}, {8, 8, 0}, {0, 8, 0}, {0, 0, 0}, 0,
+ FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_TRUECOLOR, 2, 2},
+ SVGA_FORMAT_END
+};
+
+
+/* CRT timing register sets */
+
+static const struct vga_regset ark_h_total_regs[] = {{0x00, 0, 7}, {0x41, 7, 7}, VGA_REGSET_END};
+static const struct vga_regset ark_h_display_regs[] = {{0x01, 0, 7}, {0x41, 6, 6}, VGA_REGSET_END};
+static const struct vga_regset ark_h_blank_start_regs[] = {{0x02, 0, 7}, {0x41, 5, 5}, VGA_REGSET_END};
+static const struct vga_regset ark_h_blank_end_regs[] = {{0x03, 0, 4}, {0x05, 7, 7 }, VGA_REGSET_END};
+static const struct vga_regset ark_h_sync_start_regs[] = {{0x04, 0, 7}, {0x41, 4, 4}, VGA_REGSET_END};
+static const struct vga_regset ark_h_sync_end_regs[] = {{0x05, 0, 4}, VGA_REGSET_END};
+
+static const struct vga_regset ark_v_total_regs[] = {{0x06, 0, 7}, {0x07, 0, 0}, {0x07, 5, 5}, {0x40, 7, 7}, VGA_REGSET_END};
+static const struct vga_regset ark_v_display_regs[] = {{0x12, 0, 7}, {0x07, 1, 1}, {0x07, 6, 6}, {0x40, 6, 6}, VGA_REGSET_END};
+static const struct vga_regset ark_v_blank_start_regs[] = {{0x15, 0, 7}, {0x07, 3, 3}, {0x09, 5, 5}, {0x40, 5, 5}, VGA_REGSET_END};
+// const struct vga_regset ark_v_blank_end_regs[] = {{0x16, 0, 6}, VGA_REGSET_END};
+static const struct vga_regset ark_v_blank_end_regs[] = {{0x16, 0, 7}, VGA_REGSET_END};
+static const struct vga_regset ark_v_sync_start_regs[] = {{0x10, 0, 7}, {0x07, 2, 2}, {0x07, 7, 7}, {0x40, 4, 4}, VGA_REGSET_END};
+static const struct vga_regset ark_v_sync_end_regs[] = {{0x11, 0, 3}, VGA_REGSET_END};
+
+static const struct vga_regset ark_line_compare_regs[] = {{0x18, 0, 7}, {0x07, 4, 4}, {0x09, 6, 6}, VGA_REGSET_END};
+static const struct vga_regset ark_start_address_regs[] = {{0x0d, 0, 7}, {0x0c, 0, 7}, {0x40, 0, 2}, VGA_REGSET_END};
+static const struct vga_regset ark_offset_regs[] = {{0x13, 0, 7}, {0x41, 3, 3}, VGA_REGSET_END};
+
+static const struct svga_timing_regs ark_timing_regs = {
+ ark_h_total_regs, ark_h_display_regs, ark_h_blank_start_regs,
+ ark_h_blank_end_regs, ark_h_sync_start_regs, ark_h_sync_end_regs,
+ ark_v_total_regs, ark_v_display_regs, ark_v_blank_start_regs,
+ ark_v_blank_end_regs, ark_v_sync_start_regs, ark_v_sync_end_regs,
+};
+
+
+/* ------------------------------------------------------------------------- */
+
+
+/* Module parameters */
+
+static char *mode = "640x480-8@60";
+
+#ifdef CONFIG_MTRR
+static int mtrr = 1;
+#endif
+
+MODULE_AUTHOR("(c) 2007 Ondrej Zajicek <santiago@crfreenet.org>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("fbdev driver for ARK 2000PV");
+
+module_param(mode, charp, 0444);
+MODULE_PARM_DESC(mode, "Default video mode ('640x480-8@60', etc)");
+
+#ifdef CONFIG_MTRR
+module_param(mtrr, int, 0444);
+MODULE_PARM_DESC(mtrr, "Enable write-combining with MTRR (1=enable, 0=disable, default=1)");
+#endif
+
+static int threshold = 4;
+
+module_param(threshold, int, 0644);
+MODULE_PARM_DESC(threshold, "FIFO threshold");
+
+
+/* ------------------------------------------------------------------------- */
+
+
+static void arkfb_settile(struct fb_info *info, struct fb_tilemap *map)
+{
+ const u8 *font = map->data;
+ u8 __iomem *fb = (u8 __iomem *)info->screen_base;
+ int i, c;
+
+ if ((map->width != 8) || (map->height != 16) ||
+ (map->depth != 1) || (map->length != 256)) {
+ printk(KERN_ERR "fb%d: unsupported font parameters: width %d, "
+ "height %d, depth %d, length %d\n", info->node,
+ map->width, map->height, map->depth, map->length);
+ return;
+ }
+
+ fb += 2;
+ for (c = 0; c < map->length; c++) {
+ for (i = 0; i < map->height; i++) {
+ fb_writeb(font[i], &fb[i * 4]);
+ fb_writeb(font[i], &fb[i * 4 + (128 * 8)]);
+ }
+ fb += 128;
+
+ if ((c % 8) == 7)
+ fb += 128*8;
+
+ font += map->height;
+ }
+}
+
+static struct fb_tile_ops arkfb_tile_ops = {
+ .fb_settile = arkfb_settile,
+ .fb_tilecopy = svga_tilecopy,
+ .fb_tilefill = svga_tilefill,
+ .fb_tileblit = svga_tileblit,
+ .fb_tilecursor = svga_tilecursor,
+ .fb_get_tilemax = svga_get_tilemax,
+};
+
+
+/* ------------------------------------------------------------------------- */
+
+
+/* image data is MSB-first, fb structure is MSB-first too */
+static inline u32 expand_color(u32 c)
+{
+ return ((c & 1) | ((c & 2) << 7) | ((c & 4) << 14) | ((c & 8) << 21)) * 0xFF;
+}
+
+/* arkfb_iplan_imageblit silently assumes that almost everything is 8-pixel aligned */
+static void arkfb_iplan_imageblit(struct fb_info *info, const struct fb_image *image)
+{
+ u32 fg = expand_color(image->fg_color);
+ u32 bg = expand_color(image->bg_color);
+ const u8 *src1, *src;
+ u8 __iomem *dst1;
+ u32 __iomem *dst;
+ u32 val;
+ int x, y;
+
+ src1 = image->data;
+ dst1 = info->screen_base + (image->dy * info->fix.line_length)
+ + ((image->dx / 8) * 4);
+
+ for (y = 0; y < image->height; y++) {
+ src = src1;
+ dst = (u32 __iomem *) dst1;
+ for (x = 0; x < image->width; x += 8) {
+ val = *(src++) * 0x01010101;
+ val = (val & fg) | (~val & bg);
+ fb_writel(val, dst++);
+ }
+ src1 += image->width / 8;
+ dst1 += info->fix.line_length;
+ }
+
+}
+
+/* arkfb_iplan_fillrect silently assumes that almost everything is 8-pixel aligned */
+static void arkfb_iplan_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
+{
+ u32 fg = expand_color(rect->color);
+ u8 __iomem *dst1;
+ u32 __iomem *dst;
+ int x, y;
+
+ dst1 = info->screen_base + (rect->dy * info->fix.line_length)
+ + ((rect->dx / 8) * 4);
+
+ for (y = 0; y < rect->height; y++) {
+ dst = (u32 __iomem *) dst1;
+ for (x = 0; x < rect->width; x += 8) {
+ fb_writel(fg, dst++);
+ }
+ dst1 += info->fix.line_length;
+ }
+
+}
+
+
+/* image data is MSB-first, fb structure is high-nibble-in-low-byte-first */
+static inline u32 expand_pixel(u32 c)
+{
+ return (((c & 1) << 24) | ((c & 2) << 27) | ((c & 4) << 14) | ((c & 8) << 17) |
+ ((c & 16) << 4) | ((c & 32) << 7) | ((c & 64) >> 6) | ((c & 128) >> 3)) * 0xF;
+}
+
+/* arkfb_cfb4_imageblit silently assumes that almost everything is 8-pixel aligned */
+static void arkfb_cfb4_imageblit(struct fb_info *info, const struct fb_image *image)
+{
+ u32 fg = image->fg_color * 0x11111111;
+ u32 bg = image->bg_color * 0x11111111;
+ const u8 *src1, *src;
+ u8 __iomem *dst1;
+ u32 __iomem *dst;
+ u32 val;
+ int x, y;
+
+ src1 = image->data;
+ dst1 = info->screen_base + (image->dy * info->fix.line_length)
+ + ((image->dx / 8) * 4);
+
+ for (y = 0; y < image->height; y++) {
+ src = src1;
+ dst = (u32 __iomem *) dst1;
+ for (x = 0; x < image->width; x += 8) {
+ val = expand_pixel(*(src++));
+ val = (val & fg) | (~val & bg);
+ fb_writel(val, dst++);
+ }
+ src1 += image->width / 8;
+ dst1 += info->fix.line_length;
+ }
+
+}
+
+static void arkfb_imageblit(struct fb_info *info, const struct fb_image *image)
+{
+ if ((info->var.bits_per_pixel == 4) && (image->depth == 1)
+ && ((image->width % 8) == 0) && ((image->dx % 8) == 0)) {
+ if (info->fix.type == FB_TYPE_INTERLEAVED_PLANES)
+ arkfb_iplan_imageblit(info, image);
+ else
+ arkfb_cfb4_imageblit(info, image);
+ } else
+ cfb_imageblit(info, image);
+}
+
+static void arkfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
+{
+ if ((info->var.bits_per_pixel == 4)
+ && ((rect->width % 8) == 0) && ((rect->dx % 8) == 0)
+ && (info->fix.type == FB_TYPE_INTERLEAVED_PLANES))
+ arkfb_iplan_fillrect(info, rect);
+ else
+ cfb_fillrect(info, rect);
+}
+
+
+/* ------------------------------------------------------------------------- */
+
+
+enum
+{
+ DAC_PSEUDO8_8,
+ DAC_RGB1555_8,
+ DAC_RGB0565_8,
+ DAC_RGB0888_8,
+ DAC_RGB8888_8,
+ DAC_PSEUDO8_16,
+ DAC_RGB1555_16,
+ DAC_RGB0565_16,
+ DAC_RGB0888_16,
+ DAC_RGB8888_16,
+ DAC_MAX
+};
+
+struct dac_ops {
+ int (*dac_get_mode)(struct dac_info *info);
+ int (*dac_set_mode)(struct dac_info *info, int mode);
+ int (*dac_get_freq)(struct dac_info *info, int channel);
+ int (*dac_set_freq)(struct dac_info *info, int channel, u32 freq);
+ void (*dac_release)(struct dac_info *info);
+};
+
+typedef void (*dac_read_regs_t)(void *data, u8 *code, int count);
+typedef void (*dac_write_regs_t)(void *data, u8 *code, int count);
+
+struct dac_info
+{
+ struct dac_ops *dacops;
+ dac_read_regs_t dac_read_regs;
+ dac_write_regs_t dac_write_regs;
+ void *data;
+};
+
+
+static inline u8 dac_read_reg(struct dac_info *info, u8 reg)
+{
+ u8 code[2] = {reg, 0};
+ info->dac_read_regs(info->data, code, 1);
+ return code[1];
+}
+
+static inline void dac_read_regs(struct dac_info *info, u8 *code, int count)
+{
+ info->dac_read_regs(info->data, code, count);
+}
+
+static inline void dac_write_reg(struct dac_info *info, u8 reg, u8 val)
+{
+ u8 code[2] = {reg, val};
+ info->dac_write_regs(info->data, code, 1);
+}
+
+static inline void dac_write_regs(struct dac_info *info, u8 *code, int count)
+{
+ info->dac_write_regs(info->data, code, count);
+}
+
+static inline int dac_set_mode(struct dac_info *info, int mode)
+{
+ return info->dacops->dac_set_mode(info, mode);
+}
+
+static inline int dac_set_freq(struct dac_info *info, int channel, u32 freq)
+{
+ return info->dacops->dac_set_freq(info, channel, freq);
+}
+
+static inline void dac_release(struct dac_info *info)
+{
+ info->dacops->dac_release(info);
+}
+
+
+/* ------------------------------------------------------------------------- */
+
+
+/* ICS5342 DAC */
+
+struct ics5342_info
+{
+ struct dac_info dac;
+ u8 mode;
+};
+
+#define DAC_PAR(info) ((struct ics5342_info *) info)
+
+/* LSB is set to distinguish unused slots */
+static const u8 ics5342_mode_table[DAC_MAX] = {
+ [DAC_PSEUDO8_8] = 0x01, [DAC_RGB1555_8] = 0x21, [DAC_RGB0565_8] = 0x61,
+ [DAC_RGB0888_8] = 0x41, [DAC_PSEUDO8_16] = 0x11, [DAC_RGB1555_16] = 0x31,
+ [DAC_RGB0565_16] = 0x51, [DAC_RGB0888_16] = 0x91, [DAC_RGB8888_16] = 0x71
+};
+
+static int ics5342_set_mode(struct dac_info *info, int mode)
+{
+ u8 code;
+
+ if (mode >= DAC_MAX)
+ return -EINVAL;
+
+ code = ics5342_mode_table[mode];
+
+ if (! code)
+ return -EINVAL;
+
+ dac_write_reg(info, 6, code & 0xF0);
+ DAC_PAR(info)->mode = mode;
+
+ return 0;
+}
+
+static const struct svga_pll ics5342_pll = {3, 129, 3, 33, 0, 3,
+ 60000, 250000, 14318};
+
+/* pd4 - allow only posdivider 4 (r=2) */
+static const struct svga_pll ics5342_pll_pd4 = {3, 129, 3, 33, 2, 2,
+ 60000, 335000, 14318};
+
+/* 270 MHz should be upper bound for VCO clock according to specs,
+ but that is too restrictive in pd4 case */
+
+static int ics5342_set_freq(struct dac_info *info, int channel, u32 freq)
+{
+ u16 m, n, r;
+
+ /* only postdivider 4 (r=2) is valid in mode DAC_PSEUDO8_16 */
+ int rv = svga_compute_pll((DAC_PAR(info)->mode == DAC_PSEUDO8_16)
+ ? &ics5342_pll_pd4 : &ics5342_pll,
+ freq, &m, &n, &r, 0);
+
+ if (rv < 0) {
+ return -EINVAL;
+ } else {
+ u8 code[6] = {4, 3, 5, m-2, 5, (n-2) | (r << 5)};
+ dac_write_regs(info, code, 3);
+ return 0;
+ }
+}
+
+static void ics5342_release(struct dac_info *info)
+{
+ ics5342_set_mode(info, DAC_PSEUDO8_8);
+ kfree(info);
+}
+
+static struct dac_ops ics5342_ops = {
+ .dac_set_mode = ics5342_set_mode,
+ .dac_set_freq = ics5342_set_freq,
+ .dac_release = ics5342_release
+};
+
+
+static struct dac_info * ics5342_init(dac_read_regs_t drr, dac_write_regs_t dwr, void *data)
+{
+ struct dac_info *info = kzalloc(sizeof(struct ics5342_info), GFP_KERNEL);
+
+ if (! info)
+ return NULL;
+
+ info->dacops = &ics5342_ops;
+ info->dac_read_regs = drr;
+ info->dac_write_regs = dwr;
+ info->data = data;
+ DAC_PAR(info)->mode = DAC_PSEUDO8_8; /* estimation */
+ return info;
+}
+
+
+/* ------------------------------------------------------------------------- */
+
+
+static unsigned short dac_regs[4] = {0x3c8, 0x3c9, 0x3c6, 0x3c7};
+
+static void ark_dac_read_regs(void *data, u8 *code, int count)
+{
+ u8 regval = vga_rseq(NULL, 0x1C);
+
+ while (count != 0)
+ {
+ vga_wseq(NULL, 0x1C, regval | (code[0] & 4) ? 0x80 : 0);
+ code[1] = vga_r(NULL, dac_regs[code[0] & 3]);
+ count--;
+ code += 2;
+ }
+
+ vga_wseq(NULL, 0x1C, regval);
+}
+
+static void ark_dac_write_regs(void *data, u8 *code, int count)
+{
+ u8 regval = vga_rseq(NULL, 0x1C);
+
+ while (count != 0)
+ {
+ vga_wseq(NULL, 0x1C, regval | (code[0] & 4) ? 0x80 : 0);
+ vga_w(NULL, dac_regs[code[0] & 3], code[1]);
+ count--;
+ code += 2;
+ }
+
+ vga_wseq(NULL, 0x1C, regval);
+}
+
+
+static void ark_set_pixclock(struct fb_info *info, u32 pixclock)
+{
+ struct arkfb_info *par = info->par;
+ u8 regval;
+
+ int rv = dac_set_freq(par->dac, 0, 1000000000 / pixclock);
+ if (rv < 0) {
+ printk(KERN_ERR "fb%d: cannot set requested pixclock, keeping old value\n", info->node);
+ return;
+ }
+
+ /* Set VGA misc register */
+ regval = vga_r(NULL, VGA_MIS_R);
+ vga_w(NULL, VGA_MIS_W, regval | VGA_MIS_ENB_PLL_LOAD);
+}
+
+
+/* Open framebuffer */
+
+static int arkfb_open(struct fb_info *info, int user)
+{
+ struct arkfb_info *par = info->par;
+
+ mutex_lock(&(par->open_lock));
+ if (par->ref_count == 0) {
+ memset(&(par->state), 0, sizeof(struct vgastate));
+ par->state.flags = VGA_SAVE_MODE | VGA_SAVE_FONTS | VGA_SAVE_CMAP;
+ par->state.num_crtc = 0x60;
+ par->state.num_seq = 0x30;
+ save_vga(&(par->state));
+ }
+
+ par->ref_count++;
+ mutex_unlock(&(par->open_lock));
+
+ return 0;
+}
+
+/* Close framebuffer */
+
+static int arkfb_release(struct fb_info *info, int user)
+{
+ struct arkfb_info *par = info->par;
+
+ mutex_lock(&(par->open_lock));
+ if (par->ref_count == 0) {
+ mutex_unlock(&(par->open_lock));
+ return -EINVAL;
+ }
+
+ if (par->ref_count == 1) {
+ restore_vga(&(par->state));
+ dac_set_mode(par->dac, DAC_PSEUDO8_8);
+ }
+
+ par->ref_count--;
+ mutex_unlock(&(par->open_lock));
+
+ return 0;
+}
+
+/* Validate passed in var */
+
+static int arkfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
+{
+ int rv, mem, step;
+
+ /* Find appropriate format */
+ rv = svga_match_format (arkfb_formats, var, NULL);
+ if (rv < 0)
+ {
+ printk(KERN_ERR "fb%d: unsupported mode requested\n", info->node);
+ return rv;
+ }
+
+ /* Do not allow to have real resoulution larger than virtual */
+ if (var->xres > var->xres_virtual)
+ var->xres_virtual = var->xres;
+
+ if (var->yres > var->yres_virtual)
+ var->yres_virtual = var->yres;
+
+ /* Round up xres_virtual to have proper alignment of lines */
+ step = arkfb_formats[rv].xresstep - 1;
+ var->xres_virtual = (var->xres_virtual+step) & ~step;
+
+
+ /* Check whether have enough memory */
+ mem = ((var->bits_per_pixel * var->xres_virtual) >> 3) * var->yres_virtual;
+ if (mem > info->screen_size)
+ {
+ printk(KERN_ERR "fb%d: not enough framebuffer memory (%d kB requested , %d kB available)\n", info->node, mem >> 10, (unsigned int) (info->screen_size >> 10));
+ return -EINVAL;
+ }
+
+ rv = svga_check_timings (&ark_timing_regs, var, info->node);
+ if (rv < 0)
+ {
+ printk(KERN_ERR "fb%d: invalid timings requested\n", info->node);
+ return rv;
+ }
+
+ /* Interlaced mode is broken */
+ if (var->vmode & FB_VMODE_INTERLACED)
+ return -EINVAL;
+
+ return 0;
+}
+
+/* Set video mode from par */
+
+static int arkfb_set_par(struct fb_info *info)
+{
+ struct arkfb_info *par = info->par;
+ u32 value, mode, hmul, hdiv, offset_value, screen_size;
+ u32 bpp = info->var.bits_per_pixel;
+ u8 regval;
+
+ if (bpp != 0) {
+ info->fix.ypanstep = 1;
+ info->fix.line_length = (info->var.xres_virtual * bpp) / 8;
+
+ info->flags &= ~FBINFO_MISC_TILEBLITTING;
+ info->tileops = NULL;
+
+ /* in 4bpp supports 8p wide tiles only, any tiles otherwise */
+ info->pixmap.blit_x = (bpp == 4) ? (1 << (8 - 1)) : (~(u32)0);
+ info->pixmap.blit_y = ~(u32)0;
+
+ offset_value = (info->var.xres_virtual * bpp) / 64;
+ screen_size = info->var.yres_virtual * info->fix.line_length;
+ } else {
+ info->fix.ypanstep = 16;
+ info->fix.line_length = 0;
+
+ info->flags |= FBINFO_MISC_TILEBLITTING;
+ info->tileops = &arkfb_tile_ops;
+
+ /* supports 8x16 tiles only */
+ info->pixmap.blit_x = 1 << (8 - 1);
+ info->pixmap.blit_y = 1 << (16 - 1);
+
+ offset_value = info->var.xres_virtual / 16;
+ screen_size = (info->var.xres_virtual * info->var.yres_virtual) / 64;
+ }
+
+ info->var.xoffset = 0;
+ info->var.yoffset = 0;
+ info->var.activate = FB_ACTIVATE_NOW;
+
+ /* Unlock registers */
+ svga_wcrt_mask(0x11, 0x00, 0x80);
+
+ /* Blank screen and turn off sync */
+ svga_wseq_mask(0x01, 0x20, 0x20);
+ svga_wcrt_mask(0x17, 0x00, 0x80);
+
+ /* Set default values */
+ svga_set_default_gfx_regs();
+ svga_set_default_atc_regs();
+ svga_set_default_seq_regs();
+ svga_set_default_crt_regs();
+ svga_wcrt_multi(ark_line_compare_regs, 0xFFFFFFFF);
+ svga_wcrt_multi(ark_start_address_regs, 0);
+
+ /* ARK specific initialization */
+ svga_wseq_mask(0x10, 0x1F, 0x1F); /* enable linear framebuffer and full memory access */
+ svga_wseq_mask(0x12, 0x03, 0x03); /* 4 MB linear framebuffer size */
+
+ vga_wseq(NULL, 0x13, info->fix.smem_start >> 16);
+ vga_wseq(NULL, 0x14, info->fix.smem_start >> 24);
+ vga_wseq(NULL, 0x15, 0);
+ vga_wseq(NULL, 0x16, 0);
+
+ /* Set the FIFO threshold register */
+ /* It is fascinating way to store 5-bit value in 8-bit register */
+ regval = 0x10 | ((threshold & 0x0E) >> 1) | (threshold & 0x01) << 7 | (threshold & 0x10) << 1;
+ vga_wseq(NULL, 0x18, regval);
+
+ /* Set the offset register */
+ pr_debug("fb%d: offset register : %d\n", info->node, offset_value);
+ svga_wcrt_multi(ark_offset_regs, offset_value);
+
+ /* fix for hi-res textmode */
+ svga_wcrt_mask(0x40, 0x08, 0x08);
+
+ if (info->var.vmode & FB_VMODE_DOUBLE)
+ svga_wcrt_mask(0x09, 0x80, 0x80);
+ else
+ svga_wcrt_mask(0x09, 0x00, 0x80);
+
+ if (info->var.vmode & FB_VMODE_INTERLACED)
+ svga_wcrt_mask(0x44, 0x04, 0x04);
+ else
+ svga_wcrt_mask(0x44, 0x00, 0x04);
+
+ hmul = 1;
+ hdiv = 1;
+ mode = svga_match_format(arkfb_formats, &(info->var), &(info->fix));
+
+ /* Set mode-specific register values */
+ switch (mode) {
+ case 0:
+ pr_debug("fb%d: text mode\n", info->node);
+ svga_set_textmode_vga_regs();
+
+ vga_wseq(NULL, 0x11, 0x10); /* basic VGA mode */
+ svga_wcrt_mask(0x46, 0x00, 0x04); /* 8bit pixel path */
+ dac_set_mode(par->dac, DAC_PSEUDO8_8);
+
+ break;
+ case 1:
+ pr_debug("fb%d: 4 bit pseudocolor\n", info->node);
+ vga_wgfx(NULL, VGA_GFX_MODE, 0x40);
+
+ vga_wseq(NULL, 0x11, 0x10); /* basic VGA mode */
+ svga_wcrt_mask(0x46, 0x00, 0x04); /* 8bit pixel path */
+ dac_set_mode(par->dac, DAC_PSEUDO8_8);
+ break;
+ case 2:
+ pr_debug("fb%d: 4 bit pseudocolor, planar\n", info->node);
+
+ vga_wseq(NULL, 0x11, 0x10); /* basic VGA mode */
+ svga_wcrt_mask(0x46, 0x00, 0x04); /* 8bit pixel path */
+ dac_set_mode(par->dac, DAC_PSEUDO8_8);
+ break;
+ case 3:
+ pr_debug("fb%d: 8 bit pseudocolor\n", info->node);
+
+ vga_wseq(NULL, 0x11, 0x16); /* 8bpp accel mode */
+
+ if (info->var.pixclock > 20000) {
+ pr_debug("fb%d: not using multiplex\n", info->node);
+ svga_wcrt_mask(0x46, 0x00, 0x04); /* 8bit pixel path */
+ dac_set_mode(par->dac, DAC_PSEUDO8_8);
+ } else {
+ pr_debug("fb%d: using multiplex\n", info->node);
+ svga_wcrt_mask(0x46, 0x04, 0x04); /* 16bit pixel path */
+ dac_set_mode(par->dac, DAC_PSEUDO8_16);
+ hdiv = 2;
+ }
+ break;
+ case 4:
+ pr_debug("fb%d: 5/5/5 truecolor\n", info->node);
+
+ vga_wseq(NULL, 0x11, 0x1A); /* 16bpp accel mode */
+ svga_wcrt_mask(0x46, 0x04, 0x04); /* 16bit pixel path */
+ dac_set_mode(par->dac, DAC_RGB1555_16);
+ break;
+ case 5:
+ pr_debug("fb%d: 5/6/5 truecolor\n", info->node);
+
+ vga_wseq(NULL, 0x11, 0x1A); /* 16bpp accel mode */
+ svga_wcrt_mask(0x46, 0x04, 0x04); /* 16bit pixel path */
+ dac_set_mode(par->dac, DAC_RGB0565_16);
+ break;
+ case 6:
+ pr_debug("fb%d: 8/8/8 truecolor\n", info->node);
+
+ vga_wseq(NULL, 0x11, 0x16); /* 8bpp accel mode ??? */
+ svga_wcrt_mask(0x46, 0x04, 0x04); /* 16bit pixel path */
+ dac_set_mode(par->dac, DAC_RGB0888_16);
+ hmul = 3;
+ hdiv = 2;
+ break;
+ case 7:
+ pr_debug("fb%d: 8/8/8/8 truecolor\n", info->node);
+
+ vga_wseq(NULL, 0x11, 0x1E); /* 32bpp accel mode */
+ svga_wcrt_mask(0x46, 0x04, 0x04); /* 16bit pixel path */
+ dac_set_mode(par->dac, DAC_RGB8888_16);
+ hmul = 2;
+ break;
+ default:
+ printk(KERN_ERR "fb%d: unsupported mode - bug\n", info->node);
+ return -EINVAL;
+ }
+
+ ark_set_pixclock(info, (hdiv * info->var.pixclock) / hmul);
+ svga_set_timings(&ark_timing_regs, &(info->var), hmul, hdiv,
+ (info->var.vmode & FB_VMODE_DOUBLE) ? 2 : 1,
+ (info->var.vmode & FB_VMODE_INTERLACED) ? 2 : 1,
+ hmul, info->node);
+
+ /* Set interlaced mode start/end register */
+ value = info->var.xres + info->var.left_margin + info->var.right_margin + info->var.hsync_len;
+ value = ((value * hmul / hdiv) / 8) - 5;
+ vga_wcrt(NULL, 0x42, (value + 1) / 2);
+
+ memset_io(info->screen_base, 0x00, screen_size);
+ /* Device and screen back on */
+ svga_wcrt_mask(0x17, 0x80, 0x80);
+ svga_wseq_mask(0x01, 0x00, 0x20);
+
+ return 0;
+}
+
+/* Set a colour register */
+
+static int arkfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
+ u_int transp, struct fb_info *fb)
+{
+ switch (fb->var.bits_per_pixel) {
+ case 0:
+ case 4:
+ if (regno >= 16)
+ return -EINVAL;
+
+ if ((fb->var.bits_per_pixel == 4) &&
+ (fb->var.nonstd == 0)) {
+ outb(0xF0, VGA_PEL_MSK);
+ outb(regno*16, VGA_PEL_IW);
+ } else {
+ outb(0x0F, VGA_PEL_MSK);
+ outb(regno, VGA_PEL_IW);
+ }
+ outb(red >> 10, VGA_PEL_D);
+ outb(green >> 10, VGA_PEL_D);
+ outb(blue >> 10, VGA_PEL_D);
+ break;
+ case 8:
+ if (regno >= 256)
+ return -EINVAL;
+
+ outb(0xFF, VGA_PEL_MSK);
+ outb(regno, VGA_PEL_IW);
+ outb(red >> 10, VGA_PEL_D);
+ outb(green >> 10, VGA_PEL_D);
+ outb(blue >> 10, VGA_PEL_D);
+ break;
+ case 16:
+ if (regno >= 16)
+ return 0;
+
+ if (fb->var.green.length == 5)
+ ((u32*)fb->pseudo_palette)[regno] = ((red & 0xF800) >> 1) |
+ ((green & 0xF800) >> 6) | ((blue & 0xF800) >> 11);
+ else if (fb->var.green.length == 6)
+ ((u32*)fb->pseudo_palette)[regno] = (red & 0xF800) |
+ ((green & 0xFC00) >> 5) | ((blue & 0xF800) >> 11);
+ else
+ return -EINVAL;
+ break;
+ case 24:
+ case 32:
+ if (regno >= 16)
+ return 0;
+
+ ((u32*)fb->pseudo_palette)[regno] = ((red & 0xFF00) << 8) |
+ (green & 0xFF00) | ((blue & 0xFF00) >> 8);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Set the display blanking state */
+
+static int arkfb_blank(int blank_mode, struct fb_info *info)
+{
+ switch (blank_mode) {
+ case FB_BLANK_UNBLANK:
+ pr_debug("fb%d: unblank\n", info->node);
+ svga_wseq_mask(0x01, 0x00, 0x20);
+ svga_wcrt_mask(0x17, 0x80, 0x80);
+ break;
+ case FB_BLANK_NORMAL:
+ pr_debug("fb%d: blank\n", info->node);
+ svga_wseq_mask(0x01, 0x20, 0x20);
+ svga_wcrt_mask(0x17, 0x80, 0x80);
+ break;
+ case FB_BLANK_POWERDOWN:
+ case FB_BLANK_HSYNC_SUSPEND:
+ case FB_BLANK_VSYNC_SUSPEND:
+ pr_debug("fb%d: sync down\n", info->node);
+ svga_wseq_mask(0x01, 0x20, 0x20);
+ svga_wcrt_mask(0x17, 0x00, 0x80);
+ break;
+ }
+ return 0;
+}
+
+
+/* Pan the display */
+
+static int arkfb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
+{
+ unsigned int offset;
+
+ /* Calculate the offset */
+ if (var->bits_per_pixel == 0) {
+ offset = (var->yoffset / 16) * (var->xres_virtual / 2) + (var->xoffset / 2);
+ offset = offset >> 2;
+ } else {
+ offset = (var->yoffset * info->fix.line_length) +
+ (var->xoffset * var->bits_per_pixel / 8);
+ offset = offset >> ((var->bits_per_pixel == 4) ? 2 : 3);
+ }
+
+ /* Set the offset */
+ svga_wcrt_multi(ark_start_address_regs, offset);
+
+ return 0;
+}
+
+
+/* ------------------------------------------------------------------------- */
+
+
+/* Frame buffer operations */
+
+static struct fb_ops arkfb_ops = {
+ .owner = THIS_MODULE,
+ .fb_open = arkfb_open,
+ .fb_release = arkfb_release,
+ .fb_check_var = arkfb_check_var,
+ .fb_set_par = arkfb_set_par,
+ .fb_setcolreg = arkfb_setcolreg,
+ .fb_blank = arkfb_blank,
+ .fb_pan_display = arkfb_pan_display,
+ .fb_fillrect = arkfb_fillrect,
+ .fb_copyarea = cfb_copyarea,
+ .fb_imageblit = arkfb_imageblit,
+ .fb_get_caps = svga_get_caps,
+};
+
+
+/* ------------------------------------------------------------------------- */
+
+
+/* PCI probe */
+static int __devinit ark_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+{
+ struct fb_info *info;
+ struct arkfb_info *par;
+ int rc;
+ u8 regval;
+
+ /* Ignore secondary VGA device because there is no VGA arbitration */
+ if (! svga_primary_device(dev)) {
+ dev_info(&(dev->dev), "ignoring secondary device\n");
+ return -ENODEV;
+ }
+
+ /* Allocate and fill driver data structure */
+ info = framebuffer_alloc(sizeof(struct arkfb_info), NULL);
+ if (! info) {
+ dev_err(&(dev->dev), "cannot allocate memory\n");
+ return -ENOMEM;
+ }
+
+ par = info->par;
+ mutex_init(&par->open_lock);
+
+ info->flags = FBINFO_PARTIAL_PAN_OK | FBINFO_HWACCEL_YPAN;
+ info->fbops = &arkfb_ops;
+
+ /* Prepare PCI device */
+ rc = pci_enable_device(dev);
+ if (rc < 0) {
+ dev_err(&(dev->dev), "cannot enable PCI device\n");
+ goto err_enable_device;
+ }
+
+ rc = pci_request_regions(dev, "arkfb");
+ if (rc < 0) {
+ dev_err(&(dev->dev), "cannot reserve framebuffer region\n");
+ goto err_request_regions;
+ }
+
+ par->dac = ics5342_init(ark_dac_read_regs, ark_dac_write_regs, info);
+ if (! par->dac) {
+ rc = -ENOMEM;
+ dev_err(&(dev->dev), "RAMDAC initialization failed\n");
+ goto err_dac;
+ }
+
+ info->fix.smem_start = pci_resource_start(dev, 0);
+ info->fix.smem_len = pci_resource_len(dev, 0);
+
+ /* Map physical IO memory address into kernel space */
+ info->screen_base = pci_iomap(dev, 0, 0);
+ if (! info->screen_base) {
+ rc = -ENOMEM;
+ dev_err(&(dev->dev), "iomap for framebuffer failed\n");
+ goto err_iomap;
+ }
+
+ /* FIXME get memsize */
+ regval = vga_rseq(NULL, 0x10);
+ info->screen_size = (1 << (regval >> 6)) << 20;
+ info->fix.smem_len = info->screen_size;
+
+ strcpy(info->fix.id, "ARK 2000PV");
+ info->fix.mmio_start = 0;
+ info->fix.mmio_len = 0;
+ info->fix.type = FB_TYPE_PACKED_PIXELS;
+ info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
+ info->fix.ypanstep = 0;
+ info->fix.accel = FB_ACCEL_NONE;
+ info->pseudo_palette = (void*) (par->pseudo_palette);
+
+ /* Prepare startup mode */
+ rc = fb_find_mode(&(info->var), info, mode, NULL, 0, NULL, 8);
+ if (! ((rc == 1) || (rc == 2))) {
+ rc = -EINVAL;
+ dev_err(&(dev->dev), "mode %s not found\n", mode);
+ goto err_find_mode;
+ }
+
+ rc = fb_alloc_cmap(&info->cmap, 256, 0);
+ if (rc < 0) {
+ dev_err(&(dev->dev), "cannot allocate colormap\n");
+ goto err_alloc_cmap;
+ }
+
+ rc = register_framebuffer(info);
+ if (rc < 0) {
+ dev_err(&(dev->dev), "cannot register framebugger\n");
+ goto err_reg_fb;
+ }
+
+ printk(KERN_INFO "fb%d: %s on %s, %d MB RAM\n", info->node, info->fix.id,
+ pci_name(dev), info->fix.smem_len >> 20);
+
+ /* Record a reference to the driver data */
+ pci_set_drvdata(dev, info);
+
+#ifdef CONFIG_MTRR
+ if (mtrr) {
+ par->mtrr_reg = -1;
+ par->mtrr_reg = mtrr_add(info->fix.smem_start, info->fix.smem_len, MTRR_TYPE_WRCOMB, 1);
+ }
+#endif
+
+ return 0;
+
+ /* Error handling */
+err_reg_fb:
+ fb_dealloc_cmap(&info->cmap);
+err_alloc_cmap:
+err_find_mode:
+ pci_iounmap(dev, info->screen_base);
+err_iomap:
+ dac_release(par->dac);
+err_dac:
+ pci_release_regions(dev);
+err_request_regions:
+/* pci_disable_device(dev); */
+err_enable_device:
+ framebuffer_release(info);
+ return rc;
+}
+
+/* PCI remove */
+
+static void __devexit ark_pci_remove(struct pci_dev *dev)
+{
+ struct fb_info *info = pci_get_drvdata(dev);
+ struct arkfb_info *par = info->par;
+
+ if (info) {
+#ifdef CONFIG_MTRR
+ if (par->mtrr_reg >= 0) {
+ mtrr_del(par->mtrr_reg, 0, 0);
+ par->mtrr_reg = -1;
+ }
+#endif
+
+ dac_release(par->dac);
+ unregister_framebuffer(info);
+ fb_dealloc_cmap(&info->cmap);
+
+ pci_iounmap(dev, info->screen_base);
+ pci_release_regions(dev);
+/* pci_disable_device(dev); */
+
+ pci_set_drvdata(dev, NULL);
+ framebuffer_release(info);
+ }
+}
+
+
+#ifdef CONFIG_PM
+/* PCI suspend */
+
+static int ark_pci_suspend (struct pci_dev* dev, pm_message_t state)
+{
+ struct fb_info *info = pci_get_drvdata(dev);
+ struct arkfb_info *par = info->par;
+
+ dev_info(&(dev->dev), "suspend\n");
+
+ acquire_console_sem();
+ mutex_lock(&(par->open_lock));
+
+ if ((state.event == PM_EVENT_FREEZE) || (par->ref_count == 0)) {
+ mutex_unlock(&(par->open_lock));
+ release_console_sem();
+ return 0;
+ }
+
+ fb_set_suspend(info, 1);
+
+ pci_save_state(dev);
+ pci_disable_device(dev);
+ pci_set_power_state(dev, pci_choose_state(dev, state));
+
+ mutex_unlock(&(par->open_lock));
+ release_console_sem();
+
+ return 0;
+}
+
+
+/* PCI resume */
+
+static int ark_pci_resume (struct pci_dev* dev)
+{
+ struct fb_info *info = pci_get_drvdata(dev);
+ struct arkfb_info *par = info->par;
+
+ dev_info(&(dev->dev), "resume\n");
+
+ acquire_console_sem();
+ mutex_lock(&(par->open_lock));
+
+ if (par->ref_count == 0) {
+ mutex_unlock(&(par->open_lock));
+ release_console_sem();
+ return 0;
+ }
+
+ pci_set_power_state(dev, PCI_D0);
+ pci_restore_state(dev);
+
+ if (pci_enable_device(dev))
+ goto fail;
+
+ pci_set_master(dev);
+
+ arkfb_set_par(info);
+ fb_set_suspend(info, 0);
+
+ mutex_unlock(&(par->open_lock));
+fail:
+ release_console_sem();
+ return 0;
+}
+#else
+#define ark_pci_suspend NULL
+#define ark_pci_resume NULL
+#endif /* CONFIG_PM */
+
+/* List of boards that we are trying to support */
+
+static struct pci_device_id ark_devices[] __devinitdata = {
+ {PCI_DEVICE(0xEDD8, 0xA099)},
+ {0, 0, 0, 0, 0, 0, 0}
+};
+
+
+MODULE_DEVICE_TABLE(pci, ark_devices);
+
+static struct pci_driver arkfb_pci_driver = {
+ .name = "arkfb",
+ .id_table = ark_devices,
+ .probe = ark_pci_probe,
+ .remove = __devexit_p(ark_pci_remove),
+ .suspend = ark_pci_suspend,
+ .resume = ark_pci_resume,
+};
+
+/* Cleanup */
+
+static void __exit arkfb_cleanup(void)
+{
+ pr_debug("arkfb: cleaning up\n");
+ pci_unregister_driver(&arkfb_pci_driver);
+}
+
+/* Driver Initialisation */
+
+static int __init arkfb_init(void)
+{
+
+#ifndef MODULE
+ char *option = NULL;
+
+ if (fb_get_options("arkfb", &option))
+ return -ENODEV;
+
+ if (option && *option)
+ mode = option;
+#endif
+
+ pr_debug("arkfb: initializing\n");
+ return pci_register_driver(&arkfb_pci_driver);
+}
+
+module_init(arkfb_init);
+module_exit(arkfb_cleanup);
diff --git a/drivers/video/console/softcursor.c b/drivers/video/console/softcursor.c
index f577bd80e02..03cfb7ac573 100644
--- a/drivers/video/console/softcursor.c
+++ b/drivers/video/console/softcursor.c
@@ -1,5 +1,5 @@
/*
- * linux/drivers/video/softcursor.c
+ * linux/drivers/video/console/softcursor.c
*
* Generic software cursor for frame buffer devices
*
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index 08d4e11d912..38c2e2558f5 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -1236,6 +1236,10 @@ fb_mmap(struct file *file, struct vm_area_struct * vma)
pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
#elif defined(__arm__) || defined(__sh__) || defined(__m32r__)
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+#elif defined(__avr32__)
+ vma->vm_page_prot = __pgprot((pgprot_val(vma->vm_page_prot)
+ & ~_PAGE_CACHABLE)
+ | (_PAGE_BUFFER | _PAGE_DIRTY));
#elif defined(__ia64__)
if (efi_range_is_wc(vma->vm_start, vma->vm_end - vma->vm_start))
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
index 7e760197cf2..1a7d7789d87 100644
--- a/drivers/video/i810/i810_main.c
+++ b/drivers/video/i810/i810_main.c
@@ -1717,7 +1717,7 @@ static int __devinit i810_alloc_agp_mem(struct fb_info *info)
* @info: pointer to device specific info structure
*
* DESCRIPTION:
- * Sets the the user monitor's horizontal and vertical
+ * Sets the user monitor's horizontal and vertical
* frequency limits
*/
static void __devinit i810_init_monspecs(struct fb_info *info)
diff --git a/drivers/video/matrox/matroxfb_Ti3026.c b/drivers/video/matrox/matroxfb_Ti3026.c
index a5690a5f29d..9445cdb759b 100644
--- a/drivers/video/matrox/matroxfb_Ti3026.c
+++ b/drivers/video/matrox/matroxfb_Ti3026.c
@@ -72,7 +72,7 @@
* (c) 1998 Gerd Knorr <kraxel@cs.tu-berlin.de>
*
* (following author is not in any relation with this code, but his ideas
- * were used when writting this driver)
+ * were used when writing this driver)
*
* FreeVBE/AF (Matrox), "Shawn Hargreaves" <shawn@talula.demon.co.uk>
*
diff --git a/drivers/video/matrox/matroxfb_accel.c b/drivers/video/matrox/matroxfb_accel.c
index a5c825d9946..c57aaadf410 100644
--- a/drivers/video/matrox/matroxfb_accel.c
+++ b/drivers/video/matrox/matroxfb_accel.c
@@ -70,7 +70,7 @@
* (c) 1998 Gerd Knorr <kraxel@cs.tu-berlin.de>
*
* (following author is not in any relation with this code, but his ideas
- * were used when writting this driver)
+ * were used when writing this driver)
*
* FreeVBE/AF (Matrox), "Shawn Hargreaves" <shawn@talula.demon.co.uk>
*
diff --git a/drivers/video/matrox/matroxfb_base.c b/drivers/video/matrox/matroxfb_base.c
index cb2aa402ddf..c8559a756b7 100644
--- a/drivers/video/matrox/matroxfb_base.c
+++ b/drivers/video/matrox/matroxfb_base.c
@@ -93,7 +93,7 @@
* (c) 1998 Gerd Knorr <kraxel@cs.tu-berlin.de>
*
* (following author is not in any relation with this code, but his ideas
- * were used when writting this driver)
+ * were used when writing this driver)
*
* FreeVBE/AF (Matrox), "Shawn Hargreaves" <shawn@talula.demon.co.uk>
*
diff --git a/drivers/video/matrox/matroxfb_misc.c b/drivers/video/matrox/matroxfb_misc.c
index 18886b629cb..5948e54b9ef 100644
--- a/drivers/video/matrox/matroxfb_misc.c
+++ b/drivers/video/matrox/matroxfb_misc.c
@@ -78,7 +78,7 @@
* (c) 1998 Gerd Knorr <kraxel@cs.tu-berlin.de>
*
* (following author is not in any relation with this code, but his ideas
- * were used when writting this driver)
+ * were used when writing this driver)
*
* FreeVBE/AF (Matrox), "Shawn Hargreaves" <shawn@talula.demon.co.uk>
*
diff --git a/drivers/video/nvidia/nv_hw.c b/drivers/video/nvidia/nv_hw.c
index f297c7b14a4..c627955aa12 100644
--- a/drivers/video/nvidia/nv_hw.c
+++ b/drivers/video/nvidia/nv_hw.c
@@ -149,8 +149,7 @@ static void nvGetClocks(struct nvidia_par *par, unsigned int *MClk,
pll = NV_RD32(par->PMC, 0x4024);
M = pll & 0xFF;
N = (pll >> 8) & 0xFF;
- if (((par->Chipset & 0xfff0) == 0x0290) ||
- ((par->Chipset & 0xfff0) == 0x0390)) {
+ if (((par->Chipset & 0xfff0) == 0x0290) || ((par->Chipset & 0xfff0) == 0x0390) || ((par->Chipset & 0xfff0) == 0x02E0)) {
MB = 1;
NB = 1;
} else {
@@ -963,6 +962,7 @@ void NVLoadStateExt(struct nvidia_par *par, RIVA_HW_STATE * state)
if (((par->Chipset & 0xfff0) == 0x0090) ||
((par->Chipset & 0xfff0) == 0x01D0) ||
+ ((par->Chipset & 0xfff0) == 0x02E0) ||
((par->Chipset & 0xfff0) == 0x0290))
regions = 15;
for(i = 0; i < regions; i++) {
@@ -1275,6 +1275,7 @@ void NVLoadStateExt(struct nvidia_par *par, RIVA_HW_STATE * state)
0x00100000);
break;
case 0x0090:
+ case 0x02E0:
case 0x0290:
NV_WR32(par->PRAMDAC, 0x0608,
NV_RD32(par->PRAMDAC, 0x0608) |
@@ -1352,6 +1353,7 @@ void NVLoadStateExt(struct nvidia_par *par, RIVA_HW_STATE * state)
} else {
if (((par->Chipset & 0xfff0) == 0x0090) ||
((par->Chipset & 0xfff0) == 0x01D0) ||
+ ((par->Chipset & 0xfff0) == 0x02E0) ||
((par->Chipset & 0xfff0) == 0x0290)) {
for (i = 0; i < 60; i++) {
NV_WR32(par->PGRAPH,
@@ -1403,6 +1405,7 @@ void NVLoadStateExt(struct nvidia_par *par, RIVA_HW_STATE * state)
} else {
if ((par->Chipset & 0xfff0) == 0x0090 ||
(par->Chipset & 0xfff0) == 0x01D0 ||
+ (par->Chipset & 0xfff0) == 0x02E0 ||
(par->Chipset & 0xfff0) == 0x0290) {
NV_WR32(par->PGRAPH, 0x0DF0,
NV_RD32(par->PFB, 0x0200));
diff --git a/drivers/video/nvidia/nvidia.c b/drivers/video/nvidia/nvidia.c
index 7c36b5fe582..f85edf084da 100644
--- a/drivers/video/nvidia/nvidia.c
+++ b/drivers/video/nvidia/nvidia.c
@@ -1243,6 +1243,7 @@ static u32 __devinit nvidia_get_arch(struct fb_info *info)
case 0x0140: /* GeForce 6600 */
case 0x0160: /* GeForce 6200 */
case 0x01D0: /* GeForce 7200, 7300, 7400 */
+ case 0x02E0: /* GeForce 7300 GT */
case 0x0090: /* GeForce 7800 */
case 0x0210: /* GeForce 6800 */
case 0x0220: /* GeForce 6200 */
diff --git a/drivers/video/s3fb.c b/drivers/video/s3fb.c
index 756fafb41d7..d11735895a0 100644
--- a/drivers/video/s3fb.c
+++ b/drivers/video/s3fb.c
@@ -796,23 +796,6 @@ static int s3fb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
return 0;
}
-/* Get capabilities of accelerator based on the mode */
-
-static void s3fb_get_caps(struct fb_info *info, struct fb_blit_caps *caps,
- struct fb_var_screeninfo *var)
-{
- if (var->bits_per_pixel == 0) {
- /* can only support 256 8x16 bitmap */
- caps->x = 1 << (8 - 1);
- caps->y = 1 << (16 - 1);
- caps->len = 256;
- } else {
- caps->x = ~(u32)0;
- caps->y = ~(u32)0;
- caps->len = ~(u32)0;
- }
-}
-
/* ------------------------------------------------------------------------- */
/* Frame buffer operations */
@@ -829,7 +812,7 @@ static struct fb_ops s3fb_ops = {
.fb_fillrect = s3fb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = s3fb_imageblit,
- .fb_get_caps = s3fb_get_caps,
+ .fb_get_caps = svga_get_caps,
};
/* ------------------------------------------------------------------------- */
diff --git a/drivers/video/skeletonfb.c b/drivers/video/skeletonfb.c
index 842b5cd054c..836a612af97 100644
--- a/drivers/video/skeletonfb.c
+++ b/drivers/video/skeletonfb.c
@@ -14,7 +14,7 @@
* of it.
*
* First the roles of struct fb_info and struct display have changed. Struct
- * display will go away. The way the the new framebuffer console code will
+ * display will go away. The way the new framebuffer console code will
* work is that it will act to translate data about the tty/console in
* struct vc_data to data in a device independent way in struct fb_info. Then
* various functions in struct fb_ops will be called to store the device
diff --git a/drivers/video/svgalib.c b/drivers/video/svgalib.c
index 079cdc911e4..25df928d37d 100644
--- a/drivers/video/svgalib.c
+++ b/drivers/video/svgalib.c
@@ -347,6 +347,23 @@ int svga_get_tilemax(struct fb_info *info)
return 256;
}
+/* Get capabilities of accelerator based on the mode */
+
+void svga_get_caps(struct fb_info *info, struct fb_blit_caps *caps,
+ struct fb_var_screeninfo *var)
+{
+ if (var->bits_per_pixel == 0) {
+ /* can only support 256 8x16 bitmap */
+ caps->x = 1 << (8 - 1);
+ caps->y = 1 << (16 - 1);
+ caps->len = 256;
+ } else {
+ caps->x = (var->bits_per_pixel == 4) ? 1 << (8 - 1) : ~(u32)0;
+ caps->y = ~(u32)0;
+ caps->len = ~(u32)0;
+ }
+}
+EXPORT_SYMBOL(svga_get_caps);
/* ------------------------------------------------------------------------- */
diff --git a/drivers/video/vt8623fb.c b/drivers/video/vt8623fb.c
new file mode 100644
index 00000000000..5e9755e464a
--- /dev/null
+++ b/drivers/video/vt8623fb.c
@@ -0,0 +1,927 @@
+/*
+ * linux/drivers/video/vt8623fb.c - fbdev driver for
+ * integrated graphic core in VIA VT8623 [CLE266] chipset
+ *
+ * Copyright (c) 2006-2007 Ondrej Zajicek <santiago@crfreenet.org>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive for
+ * more details.
+ *
+ * Code is based on s3fb, some parts are from David Boucher's viafb
+ * (http://davesdomain.org.uk/viafb/)
+ */
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/tty.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/fb.h>
+#include <linux/svga.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/console.h> /* Why should fb driver call console functions? because acquire_console_sem() */
+#include <video/vga.h>
+
+#ifdef CONFIG_MTRR
+#include <asm/mtrr.h>
+#endif
+
+struct vt8623fb_info {
+ char __iomem *mmio_base;
+ int mtrr_reg;
+ struct vgastate state;
+ struct mutex open_lock;
+ unsigned int ref_count;
+ u32 pseudo_palette[16];
+};
+
+
+
+/* ------------------------------------------------------------------------- */
+
+static const struct svga_fb_format vt8623fb_formats[] = {
+ { 0, {0, 6, 0}, {0, 6, 0}, {0, 6, 0}, {0, 0, 0}, 0,
+ FB_TYPE_TEXT, FB_AUX_TEXT_SVGA_STEP8, FB_VISUAL_PSEUDOCOLOR, 16, 16},
+ { 4, {0, 6, 0}, {0, 6, 0}, {0, 6, 0}, {0, 0, 0}, 0,
+ FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_PSEUDOCOLOR, 16, 16},
+ { 4, {0, 6, 0}, {0, 6, 0}, {0, 6, 0}, {0, 0, 0}, 1,
+ FB_TYPE_INTERLEAVED_PLANES, 1, FB_VISUAL_PSEUDOCOLOR, 16, 16},
+ { 8, {0, 6, 0}, {0, 6, 0}, {0, 6, 0}, {0, 0, 0}, 0,
+ FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_PSEUDOCOLOR, 8, 8},
+/* {16, {10, 5, 0}, {5, 5, 0}, {0, 5, 0}, {0, 0, 0}, 0,
+ FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_TRUECOLOR, 4, 4}, */
+ {16, {11, 5, 0}, {5, 6, 0}, {0, 5, 0}, {0, 0, 0}, 0,
+ FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_TRUECOLOR, 4, 4},
+ {32, {16, 8, 0}, {8, 8, 0}, {0, 8, 0}, {0, 0, 0}, 0,
+ FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_TRUECOLOR, 2, 2},
+ SVGA_FORMAT_END
+};
+
+static const struct svga_pll vt8623_pll = {2, 127, 2, 7, 0, 3,
+ 60000, 300000, 14318};
+
+/* CRT timing register sets */
+
+struct vga_regset vt8623_h_total_regs[] = {{0x00, 0, 7}, {0x36, 3, 3}, VGA_REGSET_END};
+struct vga_regset vt8623_h_display_regs[] = {{0x01, 0, 7}, VGA_REGSET_END};
+struct vga_regset vt8623_h_blank_start_regs[] = {{0x02, 0, 7}, VGA_REGSET_END};
+struct vga_regset vt8623_h_blank_end_regs[] = {{0x03, 0, 4}, {0x05, 7, 7}, {0x33, 5, 5}, VGA_REGSET_END};
+struct vga_regset vt8623_h_sync_start_regs[] = {{0x04, 0, 7}, {0x33, 4, 4}, VGA_REGSET_END};
+struct vga_regset vt8623_h_sync_end_regs[] = {{0x05, 0, 4}, VGA_REGSET_END};
+
+struct vga_regset vt8623_v_total_regs[] = {{0x06, 0, 7}, {0x07, 0, 0}, {0x07, 5, 5}, {0x35, 0, 0}, VGA_REGSET_END};
+struct vga_regset vt8623_v_display_regs[] = {{0x12, 0, 7}, {0x07, 1, 1}, {0x07, 6, 6}, {0x35, 2, 2}, VGA_REGSET_END};
+struct vga_regset vt8623_v_blank_start_regs[] = {{0x15, 0, 7}, {0x07, 3, 3}, {0x09, 5, 5}, {0x35, 3, 3}, VGA_REGSET_END};
+struct vga_regset vt8623_v_blank_end_regs[] = {{0x16, 0, 7}, VGA_REGSET_END};
+struct vga_regset vt8623_v_sync_start_regs[] = {{0x10, 0, 7}, {0x07, 2, 2}, {0x07, 7, 7}, {0x35, 1, 1}, VGA_REGSET_END};
+struct vga_regset vt8623_v_sync_end_regs[] = {{0x11, 0, 3}, VGA_REGSET_END};
+
+struct vga_regset vt8623_offset_regs[] = {{0x13, 0, 7}, {0x35, 5, 7}, VGA_REGSET_END};
+struct vga_regset vt8623_line_compare_regs[] = {{0x18, 0, 7}, {0x07, 4, 4}, {0x09, 6, 6}, {0x33, 0, 2}, {0x35, 4, 4}, VGA_REGSET_END};
+struct vga_regset vt8623_fetch_count_regs[] = {{0x1C, 0, 7}, {0x1D, 0, 1}, VGA_REGSET_END};
+struct vga_regset vt8623_start_address_regs[] = {{0x0d, 0, 7}, {0x0c, 0, 7}, {0x34, 0, 7}, {0x48, 0, 1}, VGA_REGSET_END};
+
+struct svga_timing_regs vt8623_timing_regs = {
+ vt8623_h_total_regs, vt8623_h_display_regs, vt8623_h_blank_start_regs,
+ vt8623_h_blank_end_regs, vt8623_h_sync_start_regs, vt8623_h_sync_end_regs,
+ vt8623_v_total_regs, vt8623_v_display_regs, vt8623_v_blank_start_regs,
+ vt8623_v_blank_end_regs, vt8623_v_sync_start_regs, vt8623_v_sync_end_regs,
+};
+
+
+/* ------------------------------------------------------------------------- */
+
+
+/* Module parameters */
+
+static char *mode = "640x480-8@60";
+
+#ifdef CONFIG_MTRR
+static int mtrr = 1;
+#endif
+
+MODULE_AUTHOR("(c) 2006 Ondrej Zajicek <santiago@crfreenet.org>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("fbdev driver for integrated graphics core in VIA VT8623 [CLE266]");
+
+module_param(mode, charp, 0644);
+MODULE_PARM_DESC(mode, "Default video mode ('640x480-8@60', etc)");
+
+#ifdef CONFIG_MTRR
+module_param(mtrr, int, 0444);
+MODULE_PARM_DESC(mtrr, "Enable write-combining with MTRR (1=enable, 0=disable, default=1)");
+#endif
+
+
+/* ------------------------------------------------------------------------- */
+
+
+static struct fb_tile_ops vt8623fb_tile_ops = {
+ .fb_settile = svga_settile,
+ .fb_tilecopy = svga_tilecopy,
+ .fb_tilefill = svga_tilefill,
+ .fb_tileblit = svga_tileblit,
+ .fb_tilecursor = svga_tilecursor,
+ .fb_get_tilemax = svga_get_tilemax,
+};
+
+
+/* ------------------------------------------------------------------------- */
+
+
+/* image data is MSB-first, fb structure is MSB-first too */
+static inline u32 expand_color(u32 c)
+{
+ return ((c & 1) | ((c & 2) << 7) | ((c & 4) << 14) | ((c & 8) << 21)) * 0xFF;
+}
+
+/* vt8623fb_iplan_imageblit silently assumes that almost everything is 8-pixel aligned */
+static void vt8623fb_iplan_imageblit(struct fb_info *info, const struct fb_image *image)
+{
+ u32 fg = expand_color(image->fg_color);
+ u32 bg = expand_color(image->bg_color);
+ const u8 *src1, *src;
+ u8 __iomem *dst1;
+ u32 __iomem *dst;
+ u32 val;
+ int x, y;
+
+ src1 = image->data;
+ dst1 = info->screen_base + (image->dy * info->fix.line_length)
+ + ((image->dx / 8) * 4);
+
+ for (y = 0; y < image->height; y++) {
+ src = src1;
+ dst = (u32 __iomem *) dst1;
+ for (x = 0; x < image->width; x += 8) {
+ val = *(src++) * 0x01010101;
+ val = (val & fg) | (~val & bg);
+ fb_writel(val, dst++);
+ }
+ src1 += image->width / 8;
+ dst1 += info->fix.line_length;
+ }
+}
+
+/* vt8623fb_iplan_fillrect silently assumes that almost everything is 8-pixel aligned */
+static void vt8623fb_iplan_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
+{
+ u32 fg = expand_color(rect->color);
+ u8 __iomem *dst1;
+ u32 __iomem *dst;
+ int x, y;
+
+ dst1 = info->screen_base + (rect->dy * info->fix.line_length)
+ + ((rect->dx / 8) * 4);
+
+ for (y = 0; y < rect->height; y++) {
+ dst = (u32 __iomem *) dst1;
+ for (x = 0; x < rect->width; x += 8) {
+ fb_writel(fg, dst++);
+ }
+ dst1 += info->fix.line_length;
+ }
+}
+
+
+/* image data is MSB-first, fb structure is high-nibble-in-low-byte-first */
+static inline u32 expand_pixel(u32 c)
+{
+ return (((c & 1) << 24) | ((c & 2) << 27) | ((c & 4) << 14) | ((c & 8) << 17) |
+ ((c & 16) << 4) | ((c & 32) << 7) | ((c & 64) >> 6) | ((c & 128) >> 3)) * 0xF;
+}
+
+/* vt8623fb_cfb4_imageblit silently assumes that almost everything is 8-pixel aligned */
+static void vt8623fb_cfb4_imageblit(struct fb_info *info, const struct fb_image *image)
+{
+ u32 fg = image->fg_color * 0x11111111;
+ u32 bg = image->bg_color * 0x11111111;
+ const u8 *src1, *src;
+ u8 __iomem *dst1;
+ u32 __iomem *dst;
+ u32 val;
+ int x, y;
+
+ src1 = image->data;
+ dst1 = info->screen_base + (image->dy * info->fix.line_length)
+ + ((image->dx / 8) * 4);
+
+ for (y = 0; y < image->height; y++) {
+ src = src1;
+ dst = (u32 __iomem *) dst1;
+ for (x = 0; x < image->width; x += 8) {
+ val = expand_pixel(*(src++));
+ val = (val & fg) | (~val & bg);
+ fb_writel(val, dst++);
+ }
+ src1 += image->width / 8;
+ dst1 += info->fix.line_length;
+ }
+}
+
+static void vt8623fb_imageblit(struct fb_info *info, const struct fb_image *image)
+{
+ if ((info->var.bits_per_pixel == 4) && (image->depth == 1)
+ && ((image->width % 8) == 0) && ((image->dx % 8) == 0)) {
+ if (info->fix.type == FB_TYPE_INTERLEAVED_PLANES)
+ vt8623fb_iplan_imageblit(info, image);
+ else
+ vt8623fb_cfb4_imageblit(info, image);
+ } else
+ cfb_imageblit(info, image);
+}
+
+static void vt8623fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
+{
+ if ((info->var.bits_per_pixel == 4)
+ && ((rect->width % 8) == 0) && ((rect->dx % 8) == 0)
+ && (info->fix.type == FB_TYPE_INTERLEAVED_PLANES))
+ vt8623fb_iplan_fillrect(info, rect);
+ else
+ cfb_fillrect(info, rect);
+}
+
+
+/* ------------------------------------------------------------------------- */
+
+
+static void vt8623_set_pixclock(struct fb_info *info, u32 pixclock)
+{
+ u16 m, n, r;
+ u8 regval;
+ int rv;
+
+ rv = svga_compute_pll(&vt8623_pll, 1000000000 / pixclock, &m, &n, &r, info->node);
+ if (rv < 0) {
+ printk(KERN_ERR "fb%d: cannot set requested pixclock, keeping old value\n", info->node);
+ return;
+ }
+
+ /* Set VGA misc register */
+ regval = vga_r(NULL, VGA_MIS_R);
+ vga_w(NULL, VGA_MIS_W, regval | VGA_MIS_ENB_PLL_LOAD);
+
+ /* Set clock registers */
+ vga_wseq(NULL, 0x46, (n | (r << 6)));
+ vga_wseq(NULL, 0x47, m);
+
+ udelay(1000);
+
+ /* PLL reset */
+ svga_wseq_mask(0x40, 0x02, 0x02);
+ svga_wseq_mask(0x40, 0x00, 0x02);
+}
+
+
+static int vt8623fb_open(struct fb_info *info, int user)
+{
+ struct vt8623fb_info *par = info->par;
+
+ mutex_lock(&(par->open_lock));
+ if (par->ref_count == 0) {
+ memset(&(par->state), 0, sizeof(struct vgastate));
+ par->state.flags = VGA_SAVE_MODE | VGA_SAVE_FONTS | VGA_SAVE_CMAP;
+ par->state.num_crtc = 0xA2;
+ par->state.num_seq = 0x50;
+ save_vga(&(par->state));
+ }
+
+ par->ref_count++;
+ mutex_unlock(&(par->open_lock));
+
+ return 0;
+}
+
+static int vt8623fb_release(struct fb_info *info, int user)
+{
+ struct vt8623fb_info *par = info->par;
+
+ mutex_lock(&(par->open_lock));
+ if (par->ref_count == 0) {
+ mutex_unlock(&(par->open_lock));
+ return -EINVAL;
+ }
+
+ if (par->ref_count == 1)
+ restore_vga(&(par->state));
+
+ par->ref_count--;
+ mutex_unlock(&(par->open_lock));
+
+ return 0;
+}
+
+static int vt8623fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
+{
+ int rv, mem, step;
+
+ /* Find appropriate format */
+ rv = svga_match_format (vt8623fb_formats, var, NULL);
+ if (rv < 0)
+ {
+ printk(KERN_ERR "fb%d: unsupported mode requested\n", info->node);
+ return rv;
+ }
+
+ /* Do not allow to have real resoulution larger than virtual */
+ if (var->xres > var->xres_virtual)
+ var->xres_virtual = var->xres;
+
+ if (var->yres > var->yres_virtual)
+ var->yres_virtual = var->yres;
+
+ /* Round up xres_virtual to have proper alignment of lines */
+ step = vt8623fb_formats[rv].xresstep - 1;
+ var->xres_virtual = (var->xres_virtual+step) & ~step;
+
+ /* Check whether have enough memory */
+ mem = ((var->bits_per_pixel * var->xres_virtual) >> 3) * var->yres_virtual;
+ if (mem > info->screen_size)
+ {
+ printk(KERN_ERR "fb%d: not enough framebuffer memory (%d kB requested , %d kB available)\n", info->node, mem >> 10, (unsigned int) (info->screen_size >> 10));
+ return -EINVAL;
+ }
+
+ /* Text mode is limited to 256 kB of memory */
+ if ((var->bits_per_pixel == 0) && (mem > (256*1024)))
+ {
+ printk(KERN_ERR "fb%d: text framebuffer size too large (%d kB requested, 256 kB possible)\n", info->node, mem >> 10);
+ return -EINVAL;
+ }
+
+ rv = svga_check_timings (&vt8623_timing_regs, var, info->node);
+ if (rv < 0)
+ {
+ printk(KERN_ERR "fb%d: invalid timings requested\n", info->node);
+ return rv;
+ }
+
+ /* Interlaced mode not supported */
+ if (var->vmode & FB_VMODE_INTERLACED)
+ return -EINVAL;
+
+ return 0;
+}
+
+
+static int vt8623fb_set_par(struct fb_info *info)
+{
+ u32 mode, offset_value, fetch_value, screen_size;
+ u32 bpp = info->var.bits_per_pixel;
+
+ if (bpp != 0) {
+ info->fix.ypanstep = 1;
+ info->fix.line_length = (info->var.xres_virtual * bpp) / 8;
+
+ info->flags &= ~FBINFO_MISC_TILEBLITTING;
+ info->tileops = NULL;
+
+ /* in 4bpp supports 8p wide tiles only, any tiles otherwise */
+ info->pixmap.blit_x = (bpp == 4) ? (1 << (8 - 1)) : (~(u32)0);
+ info->pixmap.blit_y = ~(u32)0;
+
+ offset_value = (info->var.xres_virtual * bpp) / 64;
+ fetch_value = ((info->var.xres * bpp) / 128) + 4;
+
+ if (bpp == 4)
+ fetch_value = (info->var.xres / 8) + 8; /* + 0 is OK */
+
+ screen_size = info->var.yres_virtual * info->fix.line_length;
+ } else {
+ info->fix.ypanstep = 16;
+ info->fix.line_length = 0;
+
+ info->flags |= FBINFO_MISC_TILEBLITTING;
+ info->tileops = &vt8623fb_tile_ops;
+
+ /* supports 8x16 tiles only */
+ info->pixmap.blit_x = 1 << (8 - 1);
+ info->pixmap.blit_y = 1 << (16 - 1);
+
+ offset_value = info->var.xres_virtual / 16;
+ fetch_value = (info->var.xres / 8) + 8;
+ screen_size = (info->var.xres_virtual * info->var.yres_virtual) / 64;
+ }
+
+ info->var.xoffset = 0;
+ info->var.yoffset = 0;
+ info->var.activate = FB_ACTIVATE_NOW;
+
+ /* Unlock registers */
+ svga_wseq_mask(0x10, 0x01, 0x01);
+ svga_wcrt_mask(0x11, 0x00, 0x80);
+ svga_wcrt_mask(0x47, 0x00, 0x01);
+
+ /* Device, screen and sync off */
+ svga_wseq_mask(0x01, 0x20, 0x20);
+ svga_wcrt_mask(0x36, 0x30, 0x30);
+ svga_wcrt_mask(0x17, 0x00, 0x80);
+
+ /* Set default values */
+ svga_set_default_gfx_regs();
+ svga_set_default_atc_regs();
+ svga_set_default_seq_regs();
+ svga_set_default_crt_regs();
+ svga_wcrt_multi(vt8623_line_compare_regs, 0xFFFFFFFF);
+ svga_wcrt_multi(vt8623_start_address_regs, 0);
+
+ svga_wcrt_multi(vt8623_offset_regs, offset_value);
+ svga_wseq_multi(vt8623_fetch_count_regs, fetch_value);
+
+ if (info->var.vmode & FB_VMODE_DOUBLE)
+ svga_wcrt_mask(0x09, 0x80, 0x80);
+ else
+ svga_wcrt_mask(0x09, 0x00, 0x80);
+
+ svga_wseq_mask(0x1E, 0xF0, 0xF0); // DI/DVP bus
+ svga_wseq_mask(0x2A, 0x0F, 0x0F); // DI/DVP bus
+ svga_wseq_mask(0x16, 0x08, 0xBF); // FIFO read treshold
+ vga_wseq(NULL, 0x17, 0x1F); // FIFO depth
+ vga_wseq(NULL, 0x18, 0x4E);
+ svga_wseq_mask(0x1A, 0x08, 0x08); // enable MMIO ?
+
+ vga_wcrt(NULL, 0x32, 0x00);
+ vga_wcrt(NULL, 0x34, 0x00);
+ vga_wcrt(NULL, 0x6A, 0x80);
+ vga_wcrt(NULL, 0x6A, 0xC0);
+
+ vga_wgfx(NULL, 0x20, 0x00);
+ vga_wgfx(NULL, 0x21, 0x00);
+ vga_wgfx(NULL, 0x22, 0x00);
+
+ /* Set SR15 according to number of bits per pixel */
+ mode = svga_match_format(vt8623fb_formats, &(info->var), &(info->fix));
+ switch (mode) {
+ case 0:
+ pr_debug("fb%d: text mode\n", info->node);
+ svga_set_textmode_vga_regs();
+ svga_wseq_mask(0x15, 0x00, 0xFE);
+ svga_wcrt_mask(0x11, 0x60, 0x70);
+ break;
+ case 1:
+ pr_debug("fb%d: 4 bit pseudocolor\n", info->node);
+ vga_wgfx(NULL, VGA_GFX_MODE, 0x40);
+ svga_wseq_mask(0x15, 0x20, 0xFE);
+ svga_wcrt_mask(0x11, 0x00, 0x70);
+ break;
+ case 2:
+ pr_debug("fb%d: 4 bit pseudocolor, planar\n", info->node);
+ svga_wseq_mask(0x15, 0x00, 0xFE);
+ svga_wcrt_mask(0x11, 0x00, 0x70);
+ break;
+ case 3:
+ pr_debug("fb%d: 8 bit pseudocolor\n", info->node);
+ svga_wseq_mask(0x15, 0x22, 0xFE);
+ break;
+ case 4:
+ pr_debug("fb%d: 5/6/5 truecolor\n", info->node);
+ svga_wseq_mask(0x15, 0xB6, 0xFE);
+ break;
+ case 5:
+ pr_debug("fb%d: 8/8/8 truecolor\n", info->node);
+ svga_wseq_mask(0x15, 0xAE, 0xFE);
+ break;
+ default:
+ printk(KERN_ERR "vt8623fb: unsupported mode - bug\n");
+ return (-EINVAL);
+ }
+
+ vt8623_set_pixclock(info, info->var.pixclock);
+ svga_set_timings(&vt8623_timing_regs, &(info->var), 1, 1,
+ (info->var.vmode & FB_VMODE_DOUBLE) ? 2 : 1, 1,
+ 1, info->node);
+
+ memset_io(info->screen_base, 0x00, screen_size);
+
+ /* Device and screen back on */
+ svga_wcrt_mask(0x17, 0x80, 0x80);
+ svga_wcrt_mask(0x36, 0x00, 0x30);
+ svga_wseq_mask(0x01, 0x00, 0x20);
+
+ return 0;
+}
+
+
+static int vt8623fb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
+ u_int transp, struct fb_info *fb)
+{
+ switch (fb->var.bits_per_pixel) {
+ case 0:
+ case 4:
+ if (regno >= 16)
+ return -EINVAL;
+
+ outb(0x0F, VGA_PEL_MSK);
+ outb(regno, VGA_PEL_IW);
+ outb(red >> 10, VGA_PEL_D);
+ outb(green >> 10, VGA_PEL_D);
+ outb(blue >> 10, VGA_PEL_D);
+ break;
+ case 8:
+ if (regno >= 256)
+ return -EINVAL;
+
+ outb(0xFF, VGA_PEL_MSK);
+ outb(regno, VGA_PEL_IW);
+ outb(red >> 10, VGA_PEL_D);
+ outb(green >> 10, VGA_PEL_D);
+ outb(blue >> 10, VGA_PEL_D);
+ break;
+ case 16:
+ if (regno >= 16)
+ return 0;
+
+ if (fb->var.green.length == 5)
+ ((u32*)fb->pseudo_palette)[regno] = ((red & 0xF800) >> 1) |
+ ((green & 0xF800) >> 6) | ((blue & 0xF800) >> 11);
+ else if (fb->var.green.length == 6)
+ ((u32*)fb->pseudo_palette)[regno] = (red & 0xF800) |
+ ((green & 0xFC00) >> 5) | ((blue & 0xF800) >> 11);
+ else
+ return -EINVAL;
+ break;
+ case 24:
+ case 32:
+ if (regno >= 16)
+ return 0;
+
+ /* ((transp & 0xFF00) << 16) */
+ ((u32*)fb->pseudo_palette)[regno] = ((red & 0xFF00) << 8) |
+ (green & 0xFF00) | ((blue & 0xFF00) >> 8);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+
+static int vt8623fb_blank(int blank_mode, struct fb_info *info)
+{
+ switch (blank_mode) {
+ case FB_BLANK_UNBLANK:
+ pr_debug("fb%d: unblank\n", info->node);
+ svga_wcrt_mask(0x36, 0x00, 0x30);
+ svga_wseq_mask(0x01, 0x00, 0x20);
+ break;
+ case FB_BLANK_NORMAL:
+ pr_debug("fb%d: blank\n", info->node);
+ svga_wcrt_mask(0x36, 0x00, 0x30);
+ svga_wseq_mask(0x01, 0x20, 0x20);
+ break;
+ case FB_BLANK_HSYNC_SUSPEND:
+ pr_debug("fb%d: DPMS standby (hsync off)\n", info->node);
+ svga_wcrt_mask(0x36, 0x10, 0x30);
+ svga_wseq_mask(0x01, 0x20, 0x20);
+ break;
+ case FB_BLANK_VSYNC_SUSPEND:
+ pr_debug("fb%d: DPMS suspend (vsync off)\n", info->node);
+ svga_wcrt_mask(0x36, 0x20, 0x30);
+ svga_wseq_mask(0x01, 0x20, 0x20);
+ break;
+ case FB_BLANK_POWERDOWN:
+ pr_debug("fb%d: DPMS off (no sync)\n", info->node);
+ svga_wcrt_mask(0x36, 0x30, 0x30);
+ svga_wseq_mask(0x01, 0x20, 0x20);
+ break;
+ }
+
+ return 0;
+}
+
+
+static int vt8623fb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
+{
+ unsigned int offset;
+
+ /* Calculate the offset */
+ if (var->bits_per_pixel == 0) {
+ offset = (var->yoffset / 16) * var->xres_virtual + var->xoffset;
+ offset = offset >> 3;
+ } else {
+ offset = (var->yoffset * info->fix.line_length) +
+ (var->xoffset * var->bits_per_pixel / 8);
+ offset = offset >> ((var->bits_per_pixel == 4) ? 2 : 1);
+ }
+
+ /* Set the offset */
+ svga_wcrt_multi(vt8623_start_address_regs, offset);
+
+ return 0;
+}
+
+
+/* ------------------------------------------------------------------------- */
+
+
+/* Frame buffer operations */
+
+static struct fb_ops vt8623fb_ops = {
+ .owner = THIS_MODULE,
+ .fb_open = vt8623fb_open,
+ .fb_release = vt8623fb_release,
+ .fb_check_var = vt8623fb_check_var,
+ .fb_set_par = vt8623fb_set_par,
+ .fb_setcolreg = vt8623fb_setcolreg,
+ .fb_blank = vt8623fb_blank,
+ .fb_pan_display = vt8623fb_pan_display,
+ .fb_fillrect = vt8623fb_fillrect,
+ .fb_copyarea = cfb_copyarea,
+ .fb_imageblit = vt8623fb_imageblit,
+ .fb_get_caps = svga_get_caps,
+};
+
+
+/* PCI probe */
+
+static int __devinit vt8623_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+{
+ struct fb_info *info;
+ struct vt8623fb_info *par;
+ unsigned int memsize1, memsize2;
+ int rc;
+
+ /* Ignore secondary VGA device because there is no VGA arbitration */
+ if (! svga_primary_device(dev)) {
+ dev_info(&(dev->dev), "ignoring secondary device\n");
+ return -ENODEV;
+ }
+
+ /* Allocate and fill driver data structure */
+ info = framebuffer_alloc(sizeof(struct vt8623fb_info), NULL);
+ if (! info) {
+ dev_err(&(dev->dev), "cannot allocate memory\n");
+ return -ENOMEM;
+ }
+
+ par = info->par;
+ mutex_init(&par->open_lock);
+
+ info->flags = FBINFO_PARTIAL_PAN_OK | FBINFO_HWACCEL_YPAN;
+ info->fbops = &vt8623fb_ops;
+
+ /* Prepare PCI device */
+
+ rc = pci_enable_device(dev);
+ if (rc < 0) {
+ dev_err(&(dev->dev), "cannot enable PCI device\n");
+ goto err_enable_device;
+ }
+
+ rc = pci_request_regions(dev, "vt8623fb");
+ if (rc < 0) {
+ dev_err(&(dev->dev), "cannot reserve framebuffer region\n");
+ goto err_request_regions;
+ }
+
+ info->fix.smem_start = pci_resource_start(dev, 0);
+ info->fix.smem_len = pci_resource_len(dev, 0);
+ info->fix.mmio_start = pci_resource_start(dev, 1);
+ info->fix.mmio_len = pci_resource_len(dev, 1);
+
+ /* Map physical IO memory address into kernel space */
+ info->screen_base = pci_iomap(dev, 0, 0);
+ if (! info->screen_base) {
+ rc = -ENOMEM;
+ dev_err(&(dev->dev), "iomap for framebuffer failed\n");
+ goto err_iomap_1;
+ }
+
+ par->mmio_base = pci_iomap(dev, 1, 0);
+ if (! par->mmio_base) {
+ rc = -ENOMEM;
+ dev_err(&(dev->dev), "iomap for MMIO failed\n");
+ goto err_iomap_2;
+ }
+
+ /* Find how many physical memory there is on card */
+ memsize1 = (vga_rseq(NULL, 0x34) + 1) >> 1;
+ memsize2 = vga_rseq(NULL, 0x39) << 2;
+
+ if ((16 <= memsize1) && (memsize1 <= 64) && (memsize1 == memsize2))
+ info->screen_size = memsize1 << 20;
+ else {
+ dev_err(&(dev->dev), "memory size detection failed (%x %x), suppose 16 MB\n", memsize1, memsize2);
+ info->screen_size = 16 << 20;
+ }
+
+ info->fix.smem_len = info->screen_size;
+ strcpy(info->fix.id, "VIA VT8623");
+ info->fix.type = FB_TYPE_PACKED_PIXELS;
+ info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
+ info->fix.ypanstep = 0;
+ info->fix.accel = FB_ACCEL_NONE;
+ info->pseudo_palette = (void*)par->pseudo_palette;
+
+ /* Prepare startup mode */
+
+ rc = fb_find_mode(&(info->var), info, mode, NULL, 0, NULL, 8);
+ if (! ((rc == 1) || (rc == 2))) {
+ rc = -EINVAL;
+ dev_err(&(dev->dev), "mode %s not found\n", mode);
+ goto err_find_mode;
+ }
+
+ rc = fb_alloc_cmap(&info->cmap, 256, 0);
+ if (rc < 0) {
+ dev_err(&(dev->dev), "cannot allocate colormap\n");
+ goto err_alloc_cmap;
+ }
+
+ rc = register_framebuffer(info);
+ if (rc < 0) {
+ dev_err(&(dev->dev), "cannot register framebugger\n");
+ goto err_reg_fb;
+ }
+
+ printk(KERN_INFO "fb%d: %s on %s, %d MB RAM\n", info->node, info->fix.id,
+ pci_name(dev), info->fix.smem_len >> 20);
+
+ /* Record a reference to the driver data */
+ pci_set_drvdata(dev, info);
+
+#ifdef CONFIG_MTRR
+ if (mtrr) {
+ par->mtrr_reg = -1;
+ par->mtrr_reg = mtrr_add(info->fix.smem_start, info->fix.smem_len, MTRR_TYPE_WRCOMB, 1);
+ }
+#endif
+
+ return 0;
+
+ /* Error handling */
+err_reg_fb:
+ fb_dealloc_cmap(&info->cmap);
+err_alloc_cmap:
+err_find_mode:
+ pci_iounmap(dev, par->mmio_base);
+err_iomap_2:
+ pci_iounmap(dev, info->screen_base);
+err_iomap_1:
+ pci_release_regions(dev);
+err_request_regions:
+/* pci_disable_device(dev); */
+err_enable_device:
+ framebuffer_release(info);
+ return rc;
+}
+
+/* PCI remove */
+
+static void __devexit vt8623_pci_remove(struct pci_dev *dev)
+{
+ struct fb_info *info = pci_get_drvdata(dev);
+ struct vt8623fb_info *par = info->par;
+
+ if (info) {
+#ifdef CONFIG_MTRR
+ if (par->mtrr_reg >= 0) {
+ mtrr_del(par->mtrr_reg, 0, 0);
+ par->mtrr_reg = -1;
+ }
+#endif
+
+ unregister_framebuffer(info);
+ fb_dealloc_cmap(&info->cmap);
+
+ pci_iounmap(dev, info->screen_base);
+ pci_iounmap(dev, par->mmio_base);
+ pci_release_regions(dev);
+/* pci_disable_device(dev); */
+
+ pci_set_drvdata(dev, NULL);
+ framebuffer_release(info);
+ }
+}
+
+
+#ifdef CONFIG_PM
+/* PCI suspend */
+
+static int vt8623_pci_suspend(struct pci_dev* dev, pm_message_t state)
+{
+ struct fb_info *info = pci_get_drvdata(dev);
+ struct vt8623fb_info *par = info->par;
+
+ dev_info(&(dev->dev), "suspend\n");
+
+ acquire_console_sem();
+ mutex_lock(&(par->open_lock));
+
+ if ((state.event == PM_EVENT_FREEZE) || (par->ref_count == 0)) {
+ mutex_unlock(&(par->open_lock));
+ release_console_sem();
+ return 0;
+ }
+
+ fb_set_suspend(info, 1);
+
+ pci_save_state(dev);
+ pci_disable_device(dev);
+ pci_set_power_state(dev, pci_choose_state(dev, state));
+
+ mutex_unlock(&(par->open_lock));
+ release_console_sem();
+
+ return 0;
+}
+
+
+/* PCI resume */
+
+static int vt8623_pci_resume(struct pci_dev* dev)
+{
+ struct fb_info *info = pci_get_drvdata(dev);
+ struct vt8623fb_info *par = info->par;
+
+ dev_info(&(dev->dev), "resume\n");
+
+ acquire_console_sem();
+ mutex_lock(&(par->open_lock));
+
+ if (par->ref_count == 0) {
+ mutex_unlock(&(par->open_lock));
+ release_console_sem();
+ return 0;
+ }
+
+ pci_set_power_state(dev, PCI_D0);
+ pci_restore_state(dev);
+
+ if (pci_enable_device(dev))
+ goto fail;
+
+ pci_set_master(dev);
+
+ vt8623fb_set_par(info);
+ fb_set_suspend(info, 0);
+
+ mutex_unlock(&(par->open_lock));
+fail:
+ release_console_sem();
+
+ return 0;
+}
+#else
+#define vt8623_pci_suspend NULL
+#define vt8623_pci_resume NULL
+#endif /* CONFIG_PM */
+
+/* List of boards that we are trying to support */
+
+static struct pci_device_id vt8623_devices[] __devinitdata = {
+ {PCI_DEVICE(PCI_VENDOR_ID_VIA, 0x3122)},
+ {0, 0, 0, 0, 0, 0, 0}
+};
+
+MODULE_DEVICE_TABLE(pci, vt8623_devices);
+
+static struct pci_driver vt8623fb_pci_driver = {
+ .name = "vt8623fb",
+ .id_table = vt8623_devices,
+ .probe = vt8623_pci_probe,
+ .remove = __devexit_p(vt8623_pci_remove),
+ .suspend = vt8623_pci_suspend,
+ .resume = vt8623_pci_resume,
+};
+
+/* Cleanup */
+
+static void __exit vt8623fb_cleanup(void)
+{
+ pr_debug("vt8623fb: cleaning up\n");
+ pci_unregister_driver(&vt8623fb_pci_driver);
+}
+
+/* Driver Initialisation */
+
+int __init vt8623fb_init(void)
+{
+
+#ifndef MODULE
+ char *option = NULL;
+
+ if (fb_get_options("vt8623fb", &option))
+ return -ENODEV;
+
+ if (option && *option)
+ mode = option;
+#endif
+
+ pr_debug("vt8623fb: initializing\n");
+ return pci_register_driver(&vt8623fb_pci_driver);
+}
+
+/* ------------------------------------------------------------------------- */
+
+/* Modularization */
+
+module_init(vt8623fb_init);
+module_exit(vt8623fb_cleanup);
diff --git a/drivers/w1/Kconfig b/drivers/w1/Kconfig
index c287a9ae4fd..ca75b3ad3a2 100644
--- a/drivers/w1/Kconfig
+++ b/drivers/w1/Kconfig
@@ -1,4 +1,5 @@
menu "Dallas's 1-wire bus"
+ depends on HAS_IOMEM
config W1
tristate "Dallas's 1-wire support"