aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJeff Garzik <jgarzik@pobox.com>2005-10-28 12:31:34 -0400
committerJeff Garzik <jgarzik@pobox.com>2005-10-28 12:31:34 -0400
commit96b88fb850cc419171f926ad04650ec509e9f543 (patch)
tree5bf9537bde467534608b3acdbfa5f7726ede8c3f
parente78a57de94480226f7fc90d0b4837bfc6c99a9e0 (diff)
parent5fadd053d9bb4345ec6f405d24db4e7eb49cf81e (diff)
Merge branch 'master'
-rw-r--r--Documentation/block/biodoc.txt113
-rw-r--r--Documentation/kernel-parameters.txt496
-rw-r--r--Documentation/networking/bonding.txt5
-rw-r--r--Makefile4
-rw-r--r--arch/alpha/kernel/pci-noop.c2
-rw-r--r--arch/alpha/kernel/pci_iommu.c2
-rw-r--r--arch/arm/mach-integrator/impd1.c15
-rw-r--r--arch/arm/mach-pxa/corgi_lcd.c2
-rw-r--r--arch/arm/mach-pxa/generic.c20
-rw-r--r--arch/arm/mach-s3c2410/mach-bast.c6
-rw-r--r--arch/arm/mm/consistent.c8
-rw-r--r--arch/frv/mb93090-mb00/pci-dma-nommu.c2
-rw-r--r--arch/frv/mb93090-mb00/pci-dma.c2
-rw-r--r--arch/frv/mm/dma-alloc.c2
-rw-r--r--arch/i386/kernel/cpu/cpufreq/powernow-k8.c30
-rw-r--r--arch/ia64/hp/common/hwsw_iommu.c2
-rw-r--r--arch/ia64/hp/common/sba_iommu.c2
-rw-r--r--arch/ia64/lib/swiotlb.c2
-rw-r--r--arch/ia64/sn/kernel/xpc.h2
-rw-r--r--arch/ia64/sn/pci/pci_dma.c2
-rw-r--r--arch/mips/mm/dma-coherent.c4
-rw-r--r--arch/mips/mm/dma-ip27.c4
-rw-r--r--arch/mips/mm/dma-ip32.c4
-rw-r--r--arch/mips/mm/dma-noncoherent.c4
-rw-r--r--arch/parisc/kernel/pci-dma.c6
-rw-r--r--arch/ppc/8xx_io/cs4218.h2
-rw-r--r--arch/ppc/8xx_io/cs4218_tdm.c4
-rw-r--r--arch/ppc/kernel/dma-mapping.c4
-rw-r--r--arch/ppc/mm/pgtable.c4
-rw-r--r--arch/ppc64/kernel/iSeries_htab.c2
-rw-r--r--arch/ppc64/kernel/mpic.c4
-rw-r--r--arch/ppc64/kernel/time.c2
-rw-r--r--arch/ppc64/mm/init.c3
-rw-r--r--arch/sh/boards/renesas/rts7751r2d/mach.c2
-rw-r--r--arch/sh/cchips/voyagergx/consistent.c2
-rw-r--r--arch/sh/drivers/pci/dma-dreamcast.c2
-rw-r--r--arch/sh/mm/consistent.c2
-rw-r--r--arch/sparc64/solaris/socksys.c2
-rw-r--r--arch/sparc64/solaris/timod.c2
-rw-r--r--arch/um/include/sysdep-i386/thread.h2
-rw-r--r--arch/um/include/sysdep-x86_64/thread.h2
-rw-r--r--arch/um/kernel/mem.c2
-rw-r--r--arch/um/kernel/process_kern.c2
-rw-r--r--arch/x86_64/kernel/pci-gart.c4
-rw-r--r--arch/x86_64/kernel/pci-nommu.c2
-rw-r--r--arch/xtensa/kernel/pci-dma.c2
-rw-r--r--drivers/block/as-iosched.c327
-rw-r--r--drivers/block/cfq-iosched.c372
-rw-r--r--drivers/block/deadline-iosched.c125
-rw-r--r--drivers/block/elevator.c345
-rw-r--r--drivers/block/ll_rw_blk.c193
-rw-r--r--drivers/block/loop.c2
-rw-r--r--drivers/block/noop-iosched.c48
-rw-r--r--drivers/block/rd.c2
-rw-r--r--drivers/char/drm/drm_vm.c3
-rw-r--r--drivers/char/drm/mga_drv.h2
-rw-r--r--drivers/char/drm/mga_state.c2
-rw-r--r--drivers/char/drm/radeon_cp.c11
-rw-r--r--drivers/char/n_tty.c2
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c6
-rw-r--r--drivers/ieee1394/eth1394.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cmd.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cmd.h2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_eq.c21
-rw-r--r--drivers/infiniband/hw/mthca/mthca_memfree.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_memfree.h2
-rw-r--r--drivers/md/bitmap.c2
-rw-r--r--drivers/md/dm-crypt.c2
-rw-r--r--drivers/md/md.c10
-rw-r--r--drivers/media/video/Kconfig1
-rw-r--r--drivers/message/fusion/mptsas.c12
-rw-r--r--drivers/net/8139cp.c5
-rw-r--r--drivers/net/8139too.c5
-rw-r--r--drivers/net/Kconfig23
-rw-r--r--drivers/net/Makefile4
-rw-r--r--drivers/net/au1000_eth.c13
-rw-r--r--drivers/net/b44.c136
-rw-r--r--drivers/net/b44.h2
-rw-r--r--drivers/net/bonding/bond_main.c57
-rw-r--r--drivers/net/cassini.c4
-rw-r--r--drivers/net/declance.c37
-rw-r--r--drivers/net/e100.c4
-rw-r--r--drivers/net/e1000/e1000.h74
-rw-r--r--drivers/net/e1000/e1000_ethtool.c95
-rw-r--r--drivers/net/e1000/e1000_hw.c220
-rw-r--r--drivers/net/e1000/e1000_hw.h96
-rw-r--r--drivers/net/e1000/e1000_main.c1078
-rw-r--r--drivers/net/e1000/e1000_param.c10
-rw-r--r--drivers/net/epic100.c4
-rw-r--r--drivers/net/forcedeth.c310
-rw-r--r--drivers/net/gianfar.c412
-rw-r--r--drivers/net/gianfar.h30
-rw-r--r--drivers/net/gianfar_ethtool.c100
-rw-r--r--drivers/net/gianfar_mii.c219
-rw-r--r--drivers/net/gianfar_mii.h45
-rw-r--r--drivers/net/gianfar_phy.c661
-rw-r--r--drivers/net/gianfar_phy.h213
-rw-r--r--drivers/net/hamradio/Kconfig1
-rw-r--r--drivers/net/hamradio/bpqether.c9
-rw-r--r--drivers/net/hamradio/mkiss.c182
-rw-r--r--drivers/net/hamradio/mkiss.h62
-rw-r--r--drivers/net/hp100.c48
-rw-r--r--drivers/net/irda/stir4200.c7
-rw-r--r--drivers/net/ixgb/ixgb_ethtool.c8
-rw-r--r--drivers/net/ixgb/ixgb_main.c3
-rw-r--r--drivers/net/lance.c4
-rw-r--r--drivers/net/lne390.c2
-rw-r--r--drivers/net/mii.c15
-rw-r--r--drivers/net/mipsnet.c371
-rw-r--r--drivers/net/mipsnet.h127
-rw-r--r--drivers/net/myri_sbus.c2
-rw-r--r--drivers/net/myri_sbus.h2
-rw-r--r--drivers/net/ne.c15
-rw-r--r--drivers/net/ne2k-pci.c2
-rw-r--r--drivers/net/ns83820.c3
-rw-r--r--drivers/net/pcnet32.c278
-rw-r--r--drivers/net/phy/Kconfig8
-rw-r--r--drivers/net/phy/phy.c8
-rw-r--r--drivers/net/phy/phy_device.c3
-rw-r--r--drivers/net/r8169.c2
-rw-r--r--drivers/net/rionet.c574
-rw-r--r--drivers/net/s2io-regs.h11
-rw-r--r--drivers/net/s2io.c791
-rw-r--r--drivers/net/s2io.h50
-rw-r--r--drivers/net/sb1250-mac.c1384
-rw-r--r--drivers/net/sgiseeq.c37
-rw-r--r--drivers/net/skge.c2
-rw-r--r--drivers/net/sunbmac.c3
-rw-r--r--drivers/net/sunbmac.h2
-rw-r--r--drivers/net/sundance.c49
-rw-r--r--drivers/net/tokenring/ibmtr.c9
-rw-r--r--drivers/net/tokenring/olympic.c2
-rw-r--r--drivers/net/tokenring/tms380tr.c3
-rw-r--r--drivers/net/tulip/de2104x.c5
-rw-r--r--drivers/net/typhoon.c7
-rw-r--r--drivers/net/via-rhine.c38
-rw-r--r--drivers/net/wan/cosa.c6
-rw-r--r--drivers/net/wan/cycx_drv.c7
-rw-r--r--drivers/net/wan/cycx_main.c2
-rw-r--r--drivers/net/wan/cycx_x25.c5
-rw-r--r--drivers/net/wan/dscc4.c23
-rw-r--r--drivers/net/wan/farsync.c27
-rw-r--r--drivers/net/wan/hdlc_fr.c2
-rw-r--r--drivers/net/wan/lmc/lmc_debug.c10
-rw-r--r--drivers/net/wan/lmc/lmc_media.c8
-rw-r--r--drivers/net/wan/pc300.h16
-rw-r--r--drivers/net/wan/pc300_drv.c87
-rw-r--r--drivers/net/wan/pc300_tty.c18
-rw-r--r--drivers/net/wan/sdla.c20
-rw-r--r--drivers/net/wan/sdla_fr.c4
-rw-r--r--drivers/net/wan/sdla_x25.c8
-rw-r--r--drivers/net/wan/sdladrv.c16
-rw-r--r--drivers/net/wan/syncppp.c10
-rw-r--r--drivers/net/wireless/airo.c37
-rw-r--r--drivers/net/wireless/airport.c19
-rw-r--r--drivers/net/wireless/atmel.c24
-rw-r--r--drivers/net/wireless/hermes.c11
-rw-r--r--drivers/net/wireless/hermes.h111
-rw-r--r--drivers/net/wireless/hostap/hostap.c6
-rw-r--r--drivers/net/wireless/hostap/hostap_80211_rx.c43
-rw-r--r--drivers/net/wireless/hostap/hostap_80211_tx.c28
-rw-r--r--drivers/net/wireless/hostap/hostap_ap.c80
-rw-r--r--drivers/net/wireless/hostap/hostap_ap.h6
-rw-r--r--drivers/net/wireless/hostap/hostap_cs.c50
-rw-r--r--drivers/net/wireless/hostap/hostap_hw.c22
-rw-r--r--drivers/net/wireless/hostap/hostap_ioctl.c23
-rw-r--r--drivers/net/wireless/hostap/hostap_pci.c21
-rw-r--r--drivers/net/wireless/hostap/hostap_plx.c11
-rw-r--r--drivers/net/wireless/hostap/hostap_wlan.h2
-rw-r--r--drivers/net/wireless/ipw2100.c24
-rw-r--r--drivers/net/wireless/ipw2100.h2
-rw-r--r--drivers/net/wireless/ipw2200.c27
-rw-r--r--drivers/net/wireless/ipw2200.h4
-rw-r--r--drivers/net/wireless/netwave_cs.c185
-rw-r--r--drivers/net/wireless/orinoco.c235
-rw-r--r--drivers/net/wireless/orinoco.h16
-rw-r--r--drivers/net/wireless/orinoco_cs.c110
-rw-r--r--drivers/net/wireless/orinoco_nortel.c20
-rw-r--r--drivers/net/wireless/orinoco_pci.c18
-rw-r--r--drivers/net/wireless/orinoco_plx.c18
-rw-r--r--drivers/net/wireless/orinoco_tmd.c18
-rw-r--r--drivers/net/wireless/prism54/isl_ioctl.c10
-rw-r--r--drivers/net/wireless/prism54/islpci_dev.c10
-rw-r--r--drivers/net/wireless/prism54/islpci_dev.h2
-rw-r--r--drivers/net/wireless/prism54/islpci_mgt.c5
-rw-r--r--drivers/net/wireless/ray_cs.c46
-rw-r--r--drivers/net/wireless/spectrum_cs.c79
-rw-r--r--drivers/net/wireless/wavelan.c8
-rw-r--r--drivers/net/wireless/wavelan.p.h4
-rw-r--r--drivers/net/wireless/wavelan_cs.c8
-rw-r--r--drivers/net/wireless/wavelan_cs.p.h4
-rw-r--r--drivers/net/wireless/wl3501.h2
-rw-r--r--drivers/parisc/ccio-dma.c2
-rw-r--r--drivers/parisc/sba_iommu.c2
-rw-r--r--drivers/pci/quirks.c101
-rw-r--r--drivers/pci/setup-bus.c2
-rw-r--r--drivers/s390/net/fsm.c2
-rw-r--r--drivers/s390/net/fsm.h2
-rw-r--r--drivers/s390/net/qeth.h45
-rw-r--r--drivers/s390/net/qeth_fs.h12
-rw-r--r--drivers/s390/net/qeth_main.c419
-rw-r--r--drivers/s390/net/qeth_mpc.c6
-rw-r--r--drivers/s390/net/qeth_mpc.h15
-rw-r--r--drivers/s390/net/qeth_sys.c28
-rw-r--r--drivers/scsi/NCR5380.c2
-rw-r--r--drivers/scsi/aacraid/aacraid.h2
-rw-r--r--drivers/scsi/ahci.c2
-rw-r--r--drivers/scsi/ata_piix.c4
-rw-r--r--drivers/scsi/eata.c2
-rw-r--r--drivers/scsi/hosts.c3
-rw-r--r--drivers/scsi/libata-core.c106
-rw-r--r--drivers/scsi/libata-scsi.c24
-rw-r--r--drivers/scsi/libata.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c2
-rw-r--r--drivers/scsi/osst.c6
-rw-r--r--drivers/scsi/pdc_adma.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h4
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_rscn.c2
-rw-r--r--drivers/scsi/sata_mv.c2
-rw-r--r--drivers/scsi/sata_nv.c2
-rw-r--r--drivers/scsi/sata_promise.c12
-rw-r--r--drivers/scsi/sata_qstor.c2
-rw-r--r--drivers/scsi/sata_sil.c2
-rw-r--r--drivers/scsi/sata_sil24.c2
-rw-r--r--drivers/scsi/sata_sis.c2
-rw-r--r--drivers/scsi/sata_svw.c4
-rw-r--r--drivers/scsi/sata_sx4.c10
-rw-r--r--drivers/scsi/sata_uli.c2
-rw-r--r--drivers/scsi/sata_via.c2
-rw-r--r--drivers/scsi/sata_vsc.c4
-rw-r--r--drivers/scsi/scsi.c8
-rw-r--r--drivers/scsi/scsi_devinfo.c1
-rw-r--r--drivers/scsi/scsi_ioctl.c3
-rw-r--r--drivers/scsi/scsi_lib.c9
-rw-r--r--drivers/scsi/scsi_transport_fc.c13
-rw-r--r--drivers/scsi/sg.c2
-rw-r--r--drivers/scsi/st.c6
-rw-r--r--drivers/serial/8250_pci.c26
-rw-r--r--drivers/usb/core/buffer.c2
-rw-r--r--drivers/usb/core/hcd.c2
-rw-r--r--drivers/usb/core/hcd.h8
-rw-r--r--drivers/usb/core/message.c2
-rw-r--r--drivers/usb/core/urb.c4
-rw-r--r--drivers/usb/core/usb.c2
-rw-r--r--drivers/usb/gadget/dummy_hcd.c8
-rw-r--r--drivers/usb/gadget/ether.c22
-rw-r--r--drivers/usb/gadget/goku_udc.c6
-rw-r--r--drivers/usb/gadget/lh7a40x_udc.c12
-rw-r--r--drivers/usb/gadget/net2280.c6
-rw-r--r--drivers/usb/gadget/omap_udc.c6
-rw-r--r--drivers/usb/gadget/pxa2xx_udc.c6
-rw-r--r--drivers/usb/gadget/serial.c16
-rw-r--r--drivers/usb/gadget/zero.c8
-rw-r--r--drivers/usb/host/ehci-hcd.c2
-rw-r--r--drivers/usb/host/ehci-mem.c6
-rw-r--r--drivers/usb/host/ehci-q.c6
-rw-r--r--drivers/usb/host/ehci-sched.c14
-rw-r--r--drivers/usb/host/isp116x-hcd.c2
-rw-r--r--drivers/usb/host/ohci-hcd.c2
-rw-r--r--drivers/usb/host/ohci-mem.c4
-rw-r--r--drivers/usb/host/sl811-hcd.c2
-rw-r--r--drivers/usb/host/uhci-q.c2
-rw-r--r--drivers/usb/misc/uss720.c6
-rw-r--r--drivers/usb/net/asix.c2
-rw-r--r--drivers/usb/net/gl620a.c2
-rw-r--r--drivers/usb/net/kaweth.c6
-rw-r--r--drivers/usb/net/net1080.c2
-rw-r--r--drivers/usb/net/rndis_host.c2
-rw-r--r--drivers/usb/net/usbnet.c2
-rw-r--r--drivers/usb/net/usbnet.h2
-rw-r--r--drivers/usb/net/zaurus.c2
-rw-r--r--drivers/usb/net/zd1201.c2
-rw-r--r--drivers/video/fbsysfs.c8
-rw-r--r--fs/afs/file.c4
-rw-r--r--fs/aio.c6
-rw-r--r--fs/bio.c4
-rw-r--r--fs/buffer.c4
-rw-r--r--fs/dcache.c2
-rw-r--r--fs/dquot.c2
-rw-r--r--fs/exec.c12
-rw-r--r--fs/ext3/inode.c2
-rw-r--r--fs/hfs/inode.c2
-rw-r--r--fs/hfsplus/inode.c2
-rw-r--r--fs/hfsplus/super.c1
-rw-r--r--fs/inode.c2
-rw-r--r--fs/inotify.c1
-rw-r--r--fs/jbd/journal.c2
-rw-r--r--fs/jbd/transaction.c2
-rw-r--r--fs/jfs/jfs_metapage.c4
-rw-r--r--fs/lockd/host.c4
-rw-r--r--fs/locks.c48
-rw-r--r--fs/mbcache.c6
-rw-r--r--fs/namei.c95
-rw-r--r--fs/nfs/delegation.c2
-rw-r--r--fs/nfs/delegation.h16
-rw-r--r--fs/nfs/dir.c67
-rw-r--r--fs/nfs/file.c31
-rw-r--r--fs/nfs/inode.c200
-rw-r--r--fs/nfs/nfs2xdr.c1
-rw-r--r--fs/nfs/nfs3proc.c92
-rw-r--r--fs/nfs/nfs3xdr.c1
-rw-r--r--fs/nfs/nfs4_fs.h53
-rw-r--r--fs/nfs/nfs4proc.c735
-rw-r--r--fs/nfs/nfs4state.c181
-rw-r--r--fs/nfs/nfs4xdr.c305
-rw-r--r--fs/nfs/proc.c44
-rw-r--r--fs/nfs/read.c1
-rw-r--r--fs/nfs/write.c2
-rw-r--r--fs/open.c79
-rw-r--r--fs/partitions/check.c2
-rw-r--r--fs/reiserfs/fix_node.c2
-rw-r--r--fs/reiserfs/inode.c2
-rw-r--r--fs/reiserfs/xattr.c2
-rw-r--r--fs/xfs/linux-2.6/kmem.c22
-rw-r--r--fs/xfs/linux-2.6/kmem.h18
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c2
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c8
-rw-r--r--include/asm-alpha/atomic.h14
-rw-r--r--include/asm-alpha/barrier.h34
-rw-r--r--include/asm-alpha/dma-mapping.h2
-rw-r--r--include/asm-alpha/system.h31
-rw-r--r--include/asm-arm/arch-s3c2410/regs-clock.h21
-rw-r--r--include/asm-arm/bitops.h1
-rw-r--r--include/asm-arm/dma-mapping.h4
-rw-r--r--include/asm-cris/dma-mapping.h4
-rw-r--r--include/asm-frv/dma-mapping.h2
-rw-r--r--include/asm-frv/pci.h2
-rw-r--r--include/asm-generic/dma-mapping-broken.h2
-rw-r--r--include/asm-ia64/machvec.h2
-rw-r--r--include/asm-m32r/dma-mapping.h2
-rw-r--r--include/asm-mips/dma-mapping.h4
-rw-r--r--include/asm-mips/sgi/hpc3.h40
-rw-r--r--include/asm-parisc/dma-mapping.h8
-rw-r--r--include/asm-ppc/dma-mapping.h2
-rw-r--r--include/asm-sh/dma-mapping.h4
-rw-r--r--include/asm-sh/machvec.h2
-rw-r--r--include/asm-sh64/dma-mapping.h2
-rw-r--r--include/asm-sparc/dma-mapping.h2
-rw-r--r--include/asm-sparc64/dma-mapping.h2
-rw-r--r--include/asm-um/dma-mapping.h2
-rw-r--r--include/asm-um/page.h2
-rw-r--r--include/asm-x86_64/dma-mapping.h2
-rw-r--r--include/asm-x86_64/pci.h6
-rw-r--r--include/asm-x86_64/swiotlb.h2
-rw-r--r--include/asm-xtensa/dma-mapping.h2
-rw-r--r--include/linux/ata.h6
-rw-r--r--include/linux/audit.h4
-rw-r--r--include/linux/bio.h2
-rw-r--r--include/linux/blkdev.h50
-rw-r--r--include/linux/buffer_head.h2
-rw-r--r--include/linux/cyclomx.h2
-rw-r--r--include/linux/cycx_drv.h1
-rw-r--r--include/linux/elevator.h22
-rw-r--r--include/linux/fs.h2
-rw-r--r--include/linux/genhd.h2
-rw-r--r--include/linux/gfp.h39
-rw-r--r--include/linux/hugetlb.h16
-rw-r--r--include/linux/i2o.h4
-rw-r--r--include/linux/ibmtr.h4
-rw-r--r--include/linux/idr.h3
-rw-r--r--include/linux/if_arp.h1
-rw-r--r--include/linux/jbd.h4
-rw-r--r--include/linux/kobject.h2
-rw-r--r--include/linux/libata.h34
-rw-r--r--include/linux/loop.h2
-rw-r--r--include/linux/mbcache.h2
-rw-r--r--include/linux/mii.h1
-rw-r--r--include/linux/mm.h2
-rw-r--r--include/linux/mmzone.h2
-rw-r--r--include/linux/namei.h8
-rw-r--r--include/linux/netdevice.h6
-rw-r--r--include/linux/nfs_fs.h28
-rw-r--r--include/linux/nfs_xdr.h65
-rw-r--r--include/linux/pagemap.h13
-rw-r--r--include/linux/pci_ids.h2
-rw-r--r--include/linux/radix-tree.h2
-rw-r--r--include/linux/reiserfs_fs.h2
-rw-r--r--include/linux/sdladrv.h4
-rw-r--r--include/linux/security.h2
-rw-r--r--include/linux/skbuff.h3
-rw-r--r--include/linux/slab.h2
-rw-r--r--include/linux/sunrpc/auth.h7
-rw-r--r--include/linux/sunrpc/debug.h3
-rw-r--r--include/linux/sunrpc/gss_api.h27
-rw-r--r--include/linux/sunrpc/gss_err.h10
-rw-r--r--include/linux/sunrpc/gss_krb5.h27
-rw-r--r--include/linux/sunrpc/gss_spkm3.h4
-rw-r--r--include/linux/sunrpc/msg_prot.h25
-rw-r--r--include/linux/sunrpc/xdr.h6
-rw-r--r--include/linux/sunrpc/xprt.h227
-rw-r--r--include/linux/suspend.h2
-rw-r--r--include/linux/swap.h4
-rw-r--r--include/linux/textsearch.h4
-rw-r--r--include/linux/types.h9
-rw-r--r--include/linux/usb.h8
-rw-r--r--include/linux/usb_gadget.h12
-rw-r--r--include/linux/wanpipe.h9
-rw-r--r--include/net/ax25.h2
-rw-r--r--include/net/dst.h1
-rw-r--r--include/net/ieee80211.h525
-rw-r--r--include/net/ieee80211_crypt.h38
-rw-r--r--include/net/ieee80211_radiotap.h231
-rw-r--r--include/net/llc_pdu.h4
-rw-r--r--include/net/sock.h2
-rw-r--r--include/net/syncppp.h1
-rw-r--r--include/scsi/scsi_cmnd.h2
-rw-r--r--include/scsi/scsi_request.h2
-rw-r--r--include/sound/memalloc.h2
-rw-r--r--kernel/audit.c6
-rw-r--r--kernel/auditsc.c2
-rw-r--r--kernel/exit.c1
-rw-r--r--kernel/kexec.c7
-rw-r--r--kernel/posix-cpu-timers.c110
-rw-r--r--kernel/posix-timers.c2
-rw-r--r--kernel/power/swsusp.c2
-rw-r--r--kernel/sched.c1
-rw-r--r--kernel/signal.c14
-rw-r--r--lib/idr.c15
-rw-r--r--lib/kobject.c2
-rw-r--r--lib/kobject_uevent.c4
-rw-r--r--lib/textsearch.c2
-rw-r--r--mm/filemap.c8
-rw-r--r--mm/highmem.c14
-rw-r--r--mm/hugetlb.c22
-rw-r--r--mm/memory.c14
-rw-r--r--mm/mempolicy.c6
-rw-r--r--mm/mempool.c2
-rw-r--r--mm/page_alloc.c35
-rw-r--r--mm/shmem.c4
-rw-r--r--mm/slab.c8
-rw-r--r--mm/vmscan.c8
-rw-r--r--net/802/tr.c5
-rw-r--r--net/core/neighbour.c53
-rw-r--r--net/core/pktgen.c506
-rw-r--r--net/core/skbuff.c8
-rw-r--r--net/core/sock.c2
-rw-r--r--net/core/wireless.c9
-rw-r--r--net/dccp/ipv4.c2
-rw-r--r--net/dccp/output.c12
-rw-r--r--net/dccp/proto.c2
-rw-r--r--net/decnet/af_decnet.c13
-rw-r--r--net/ieee80211/Makefile3
-rw-r--r--net/ieee80211/ieee80211_crypt.c59
-rw-r--r--net/ieee80211/ieee80211_crypt_ccmp.c75
-rw-r--r--net/ieee80211/ieee80211_crypt_tkip.c150
-rw-r--r--net/ieee80211/ieee80211_crypt_wep.c26
-rw-r--r--net/ieee80211/ieee80211_geo.c141
-rw-r--r--net/ieee80211/ieee80211_module.c65
-rw-r--r--net/ieee80211/ieee80211_rx.c610
-rw-r--r--net/ieee80211/ieee80211_tx.c321
-rw-r--r--net/ieee80211/ieee80211_wx.c372
-rw-r--r--net/ipv4/devinet.c3
-rw-r--r--net/ipv4/fib_trie.c2
-rw-r--r--net/ipv4/icmp.c5
-rw-r--r--net/ipv4/ip_output.c8
-rw-r--r--net/ipv4/netfilter/ip_conntrack_core.c132
-rw-r--r--net/ipv4/proc.c4
-rw-r--r--net/ipv4/tcp_input.c1
-rw-r--r--net/ipv4/tcp_output.c12
-rw-r--r--net/ipv6/icmp.c9
-rw-r--r--net/ipv6/ip6_flowlabel.c2
-rw-r--r--net/ipv6/proc.c4
-rw-r--r--net/netlink/af_netlink.c5
-rw-r--r--net/rose/rose_route.c2
-rw-r--r--net/sctp/proc.c4
-rw-r--r--net/sunrpc/Makefile2
-rw-r--r--net/sunrpc/auth.c1
-rw-r--r--net/sunrpc/auth_gss/Makefile2
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c187
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_crypto.c260
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_mech.c41
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_seal.c44
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_unseal.c39
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_wrap.c363
-rw-r--r--net/sunrpc/auth_gss/gss_mech_switch.c29
-rw-r--r--net/sunrpc/auth_gss/gss_spkm3_mech.c21
-rw-r--r--net/sunrpc/auth_gss/gss_spkm3_seal.c4
-rw-r--r--net/sunrpc/auth_gss/gss_spkm3_unseal.c2
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c9
-rw-r--r--net/sunrpc/auth_null.c2
-rw-r--r--net/sunrpc/auth_unix.c2
-rw-r--r--net/sunrpc/clnt.c147
-rw-r--r--net/sunrpc/pmap_clnt.c12
-rw-r--r--net/sunrpc/rpc_pipe.c29
-rw-r--r--net/sunrpc/socklib.c175
-rw-r--r--net/sunrpc/sunrpc_syms.c1
-rw-r--r--net/sunrpc/svcsock.c5
-rw-r--r--net/sunrpc/sysctl.c32
-rw-r--r--net/sunrpc/xdr.c177
-rw-r--r--net/sunrpc/xprt.c1613
-rw-r--r--net/sunrpc/xprtsock.c1252
-rw-r--r--net/xfrm/xfrm_policy.c43
-rw-r--r--net/xfrm/xfrm_state.c6
-rw-r--r--security/dummy.c2
-rw-r--r--security/selinux/hooks.c4
-rw-r--r--security/selinux/selinuxfs.c4
-rw-r--r--security/selinux/ss/policydb.c6
-rw-r--r--sound/core/init.c6
-rw-r--r--sound/core/memalloc.c4
-rw-r--r--sound/core/seq/instr/ainstr_gf1.c5
-rw-r--r--sound/core/seq/instr/ainstr_iw.c4
-rw-r--r--sound/core/seq/instr/ainstr_simple.c3
-rw-r--r--sound/oss/dmasound/dmasound.h2
-rw-r--r--sound/oss/dmasound/dmasound_atari.c4
-rw-r--r--sound/oss/dmasound/dmasound_awacs.c4
-rw-r--r--sound/oss/dmasound/dmasound_paula.c4
-rw-r--r--sound/oss/dmasound/dmasound_q40.c4
-rw-r--r--sound/usb/usbmidi.c2
510 files changed, 14796 insertions, 9494 deletions
diff --git a/Documentation/block/biodoc.txt b/Documentation/block/biodoc.txt
index 6dd274d7e1c..2d65c218216 100644
--- a/Documentation/block/biodoc.txt
+++ b/Documentation/block/biodoc.txt
@@ -906,9 +906,20 @@ Aside:
4. The I/O scheduler
-I/O schedulers are now per queue. They should be runtime switchable and modular
-but aren't yet. Jens has most bits to do this, but the sysfs implementation is
-missing.
+I/O scheduler, a.k.a. elevator, is implemented in two layers. Generic dispatch
+queue and specific I/O schedulers. Unless stated otherwise, elevator is used
+to refer to both parts and I/O scheduler to specific I/O schedulers.
+
+Block layer implements generic dispatch queue in ll_rw_blk.c and elevator.c.
+The generic dispatch queue is responsible for properly ordering barrier
+requests, requeueing, handling non-fs requests and all other subtleties.
+
+Specific I/O schedulers are responsible for ordering normal filesystem
+requests. They can also choose to delay certain requests to improve
+throughput or whatever purpose. As the plural form indicates, there are
+multiple I/O schedulers. They can be built as modules but at least one should
+be built inside the kernel. Each queue can choose different one and can also
+change to another one dynamically.
A block layer call to the i/o scheduler follows the convention elv_xxx(). This
calls elevator_xxx_fn in the elevator switch (drivers/block/elevator.c). Oh,
@@ -921,44 +932,36 @@ keeping work.
The functions an elevator may implement are: (* are mandatory)
elevator_merge_fn called to query requests for merge with a bio
-elevator_merge_req_fn " " " with another request
+elevator_merge_req_fn called when two requests get merged. the one
+ which gets merged into the other one will be
+ never seen by I/O scheduler again. IOW, after
+ being merged, the request is gone.
elevator_merged_fn called when a request in the scheduler has been
involved in a merge. It is used in the deadline
scheduler for example, to reposition the request
if its sorting order has changed.
-*elevator_next_req_fn returns the next scheduled request, or NULL
- if there are none (or none are ready).
+elevator_dispatch_fn fills the dispatch queue with ready requests.
+ I/O schedulers are free to postpone requests by
+ not filling the dispatch queue unless @force
+ is non-zero. Once dispatched, I/O schedulers
+ are not allowed to manipulate the requests -
+ they belong to generic dispatch queue.
-*elevator_add_req_fn called to add a new request into the scheduler
+elevator_add_req_fn called to add a new request into the scheduler
elevator_queue_empty_fn returns true if the merge queue is empty.
Drivers shouldn't use this, but rather check
if elv_next_request is NULL (without losing the
request if one exists!)
-elevator_remove_req_fn This is called when a driver claims ownership of
- the target request - it now belongs to the
- driver. It must not be modified or merged.
- Drivers must not lose the request! A subsequent
- call of elevator_next_req_fn must return the
- _next_ request.
-
-elevator_requeue_req_fn called to add a request to the scheduler. This
- is used when the request has alrnadebeen
- returned by elv_next_request, but hasn't
- completed. If this is not implemented then
- elevator_add_req_fn is called instead.
-
elevator_former_req_fn
elevator_latter_req_fn These return the request before or after the
one specified in disk sort order. Used by the
block layer to find merge possibilities.
-elevator_completed_req_fn called when a request is completed. This might
- come about due to being merged with another or
- when the device completes the request.
+elevator_completed_req_fn called when a request is completed.
elevator_may_queue_fn returns true if the scheduler wants to allow the
current context to queue a new request even if
@@ -967,13 +970,33 @@ elevator_may_queue_fn returns true if the scheduler wants to allow the
elevator_set_req_fn
elevator_put_req_fn Must be used to allocate and free any elevator
- specific storate for a request.
+ specific storage for a request.
+
+elevator_activate_req_fn Called when device driver first sees a request.
+ I/O schedulers can use this callback to
+ determine when actual execution of a request
+ starts.
+elevator_deactivate_req_fn Called when device driver decides to delay
+ a request by requeueing it.
elevator_init_fn
elevator_exit_fn Allocate and free any elevator specific storage
for a queue.
-4.2 I/O scheduler implementation
+4.2 Request flows seen by I/O schedulers
+All requests seens by I/O schedulers strictly follow one of the following three
+flows.
+
+ set_req_fn ->
+
+ i. add_req_fn -> (merged_fn ->)* -> dispatch_fn -> activate_req_fn ->
+ (deactivate_req_fn -> activate_req_fn ->)* -> completed_req_fn
+ ii. add_req_fn -> (merged_fn ->)* -> merge_req_fn
+ iii. [none]
+
+ -> put_req_fn
+
+4.3 I/O scheduler implementation
The generic i/o scheduler algorithm attempts to sort/merge/batch requests for
optimal disk scan and request servicing performance (based on generic
principles and device capabilities), optimized for:
@@ -993,18 +1016,7 @@ request in sort order to prevent binary tree lookups.
This arrangement is not a generic block layer characteristic however, so
elevators may implement queues as they please.
-ii. Last merge hint
-The last merge hint is part of the generic queue layer. I/O schedulers must do
-some management on it. For the most part, the most important thing is to make
-sure q->last_merge is cleared (set to NULL) when the request on it is no longer
-a candidate for merging (for example if it has been sent to the driver).
-
-The last merge performed is cached as a hint for the subsequent request. If
-sequential data is being submitted, the hint is used to perform merges without
-any scanning. This is not sufficient when there are multiple processes doing
-I/O though, so a "merge hash" is used by some schedulers.
-
-iii. Merge hash
+ii. Merge hash
AS and deadline use a hash table indexed by the last sector of a request. This
enables merging code to quickly look up "back merge" candidates, even when
multiple I/O streams are being performed at once on one disk.
@@ -1013,29 +1025,8 @@ multiple I/O streams are being performed at once on one disk.
are far less common than "back merges" due to the nature of most I/O patterns.
Front merges are handled by the binary trees in AS and deadline schedulers.
-iv. Handling barrier cases
-A request with flags REQ_HARDBARRIER or REQ_SOFTBARRIER must not be ordered
-around. That is, they must be processed after all older requests, and before
-any newer ones. This includes merges!
-
-In AS and deadline schedulers, barriers have the effect of flushing the reorder
-queue. The performance cost of this will vary from nothing to a lot depending
-on i/o patterns and device characteristics. Obviously they won't improve
-performance, so their use should be kept to a minimum.
-
-v. Handling insertion position directives
-A request may be inserted with a position directive. The directives are one of
-ELEVATOR_INSERT_BACK, ELEVATOR_INSERT_FRONT, ELEVATOR_INSERT_SORT.
-
-ELEVATOR_INSERT_SORT is a general directive for non-barrier requests.
-ELEVATOR_INSERT_BACK is used to insert a barrier to the back of the queue.
-ELEVATOR_INSERT_FRONT is used to insert a barrier to the front of the queue, and
-overrides the ordering requested by any previous barriers. In practice this is
-harmless and required, because it is used for SCSI requeueing. This does not
-require flushing the reorder queue, so does not impose a performance penalty.
-
-vi. Plugging the queue to batch requests in anticipation of opportunities for
- merge/sort optimizations
+iii. Plugging the queue to batch requests in anticipation of opportunities for
+ merge/sort optimizations
This is just the same as in 2.4 so far, though per-device unplugging
support is anticipated for 2.5. Also with a priority-based i/o scheduler,
@@ -1069,7 +1060,7 @@ Aside:
blk_kick_queue() to unplug a specific queue (right away ?)
or optionally, all queues, is in the plan.
-4.3 I/O contexts
+4.4 I/O contexts
I/O contexts provide a dynamically allocated per process data area. They may
be used in I/O schedulers, and in the block layer (could be used for IO statis,
priorities for example). See *io_context in drivers/block/ll_rw_blk.c, and
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 7086f0a90d1..971589a9752 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -17,7 +17,7 @@ are specified on the kernel command line with the module name plus
usbcore.blinkenlights=1
-The text in square brackets at the beginning of the description state the
+The text in square brackets at the beginning of the description states the
restrictions on the kernel for the said kernel parameter to be valid. The
restrictions referred to are that the relevant option is valid if:
@@ -27,8 +27,8 @@ restrictions referred to are that the relevant option is valid if:
APM Advanced Power Management support is enabled.
AX25 Appropriate AX.25 support is enabled.
CD Appropriate CD support is enabled.
- DEVFS devfs support is enabled.
- DRM Direct Rendering Management support is enabled.
+ DEVFS devfs support is enabled.
+ DRM Direct Rendering Management support is enabled.
EDD BIOS Enhanced Disk Drive Services (EDD) is enabled
EFI EFI Partitioning (GPT) is enabled
EIDE EIDE/ATAPI support is enabled.
@@ -71,7 +71,7 @@ restrictions referred to are that the relevant option is valid if:
SERIAL Serial support is enabled.
SMP The kernel is an SMP kernel.
SPARC Sparc architecture is enabled.
- SWSUSP Software suspension is enabled.
+ SWSUSP Software suspend is enabled.
TS Appropriate touchscreen support is enabled.
USB USB support is enabled.
USBHID USB Human Interface Device support is enabled.
@@ -105,13 +105,13 @@ running once the system is up.
See header of drivers/scsi/53c7xx.c.
See also Documentation/scsi/ncr53c7xx.txt.
- acpi= [HW,ACPI] Advanced Configuration and Power Interface
- Format: { force | off | ht | strict }
+ acpi= [HW,ACPI] Advanced Configuration and Power Interface
+ Format: { force | off | ht | strict | noirq }
force -- enable ACPI if default was off
off -- disable ACPI if default was on
noirq -- do not use ACPI for IRQ routing
ht -- run only enough ACPI to enable Hyper Threading
- strict -- Be less tolerant of platforms that are not
+ strict -- Be less tolerant of platforms that are not
strictly ACPI specification compliant.
See also Documentation/pm.txt, pci=noacpi
@@ -119,20 +119,23 @@ running once the system is up.
acpi_sleep= [HW,ACPI] Sleep options
Format: { s3_bios, s3_mode }
See Documentation/power/video.txt
-
+
acpi_sci= [HW,ACPI] ACPI System Control Interrupt trigger mode
- Format: { level | edge | high | low }
+ Format: { level | edge | high | low }
- acpi_irq_balance [HW,ACPI] ACPI will balance active IRQs
- default in APIC mode
+ acpi_irq_balance [HW,ACPI]
+ ACPI will balance active IRQs
+ default in APIC mode
- acpi_irq_nobalance [HW,ACPI] ACPI will not move active IRQs (default)
- default in PIC mode
+ acpi_irq_nobalance [HW,ACPI]
+ ACPI will not move active IRQs (default)
+ default in PIC mode
- acpi_irq_pci= [HW,ACPI] If irq_balance, Clear listed IRQs for use by PCI
+ acpi_irq_pci= [HW,ACPI] If irq_balance, clear listed IRQs for
+ use by PCI
Format: <irq>,<irq>...
- acpi_irq_isa= [HW,ACPI] If irq_balance, Mark listed IRQs used by ISA
+ acpi_irq_isa= [HW,ACPI] If irq_balance, mark listed IRQs used by ISA
Format: <irq>,<irq>...
acpi_osi= [HW,ACPI] empty param disables _OSI
@@ -145,14 +148,14 @@ running once the system is up.
acpi_dbg_layer= [HW,ACPI]
Format: <int>
- Each bit of the <int> indicates an acpi debug layer,
+ Each bit of the <int> indicates an ACPI debug layer,
1: enable, 0: disable. It is useful for boot time
debugging. After system has booted up, it can be set
via /proc/acpi/debug_layer.
acpi_dbg_level= [HW,ACPI]
Format: <int>
- Each bit of the <int> indicates an acpi debug level,
+ Each bit of the <int> indicates an ACPI debug level,
1: enable, 0: disable. It is useful for boot time
debugging. After system has booted up, it can be set
via /proc/acpi/debug_level.
@@ -161,12 +164,13 @@ running once the system is up.
acpi_generic_hotkey [HW,ACPI]
Allow consolidated generic hotkey driver to
- over-ride platform specific driver.
+ override platform specific driver.
See also Documentation/acpi-hotkey.txt.
enable_timer_pin_1 [i386,x86-64]
Enable PIN 1 of APIC timer
- Can be useful to work around chipset bugs (in particular on some ATI chipsets)
+ Can be useful to work around chipset bugs
+ (in particular on some ATI chipsets).
The kernel tries to set a reasonable default.
disable_timer_pin_1 [i386,x86-64]
@@ -182,7 +186,7 @@ running once the system is up.
adlib= [HW,OSS]
Format: <io>
-
+
advansys= [HW,SCSI]
See header of drivers/scsi/advansys.c.
@@ -192,7 +196,7 @@ running once the system is up.
aedsp16= [HW,OSS] Audio Excel DSP 16
Format: <io>,<irq>,<dma>,<mss_io>,<mpu_io>,<mpu_irq>
See also header of sound/oss/aedsp16.c.
-
+
aha152x= [HW,SCSI]
See Documentation/scsi/aha152x.txt.
@@ -205,10 +209,6 @@ running once the system is up.
aic79xx= [HW,SCSI]
See Documentation/scsi/aic79xx.txt.
- AM53C974= [HW,SCSI]
- Format: <host-scsi-id>,<target-scsi-id>,<max-rate>,<max-offset>
- See also header of drivers/scsi/AM53C974.c.
-
amijoy.map= [HW,JOY] Amiga joystick support
Map of devices attached to JOY0DAT and JOY1DAT
Format: <a>,<b>
@@ -219,23 +219,24 @@ running once the system is up.
connected to one of 16 gameports
Format: <type1>,<type2>,..<type16>
- apc= [HW,SPARC] Power management functions (SPARCstation-4/5 + deriv.)
+ apc= [HW,SPARC]
+ Power management functions (SPARCstation-4/5 + deriv.)
Format: noidle
Disable APC CPU standby support. SPARCstation-Fox does
not play well with APC CPU idle - disable it if you have
APC and your system crashes randomly.
- apic= [APIC,i386] Change the output verbosity whilst booting
+ apic= [APIC,i386] Change the output verbosity whilst booting
Format: { quiet (default) | verbose | debug }
Change the amount of debugging information output
when initialising the APIC and IO-APIC components.
-
+
apm= [APM] Advanced Power Management
See header of arch/i386/kernel/apm.c.
applicom= [HW]
Format: <mem>,<irq>
-
+
arcrimi= [HW,NET] ARCnet - "RIM I" (entirely mem-mapped) cards
Format: <io>,<irq>,<nodeID>
@@ -250,38 +251,40 @@ running once the system is up.
atkbd.reset= [HW] Reset keyboard during initialization
- atkbd.set= [HW] Select keyboard code set
- Format: <int> (2 = AT (default) 3 = PS/2)
+ atkbd.set= [HW] Select keyboard code set
+ Format: <int> (2 = AT (default), 3 = PS/2)
atkbd.scroll= [HW] Enable scroll wheel on MS Office and similar
keyboards
atkbd.softraw= [HW] Choose between synthetic and real raw mode
Format: <bool> (0 = real, 1 = synthetic (default))
-
- atkbd.softrepeat=
- [HW] Use software keyboard repeat
+
+ atkbd.softrepeat= [HW]
+ Use software keyboard repeat
autotest [IA64]
awe= [HW,OSS] AWE32/SB32/AWE64 wave table synth
Format: <io>,<memsize>,<isapnp>
-
+
aztcd= [HW,CD] Aztech CD268 CDROM driver
Format: <io>,0x79 (?)
baycom_epp= [HW,AX25]
Format: <io>,<mode>
-
+
baycom_par= [HW,AX25] BayCom Parallel Port AX.25 Modem
Format: <io>,<mode>
See header of drivers/net/hamradio/baycom_par.c.
- baycom_ser_fdx= [HW,AX25] BayCom Serial Port AX.25 Modem (Full Duplex Mode)
+ baycom_ser_fdx= [HW,AX25]
+ BayCom Serial Port AX.25 Modem (Full Duplex Mode)
Format: <io>,<irq>,<mode>[,<baud>]
See header of drivers/net/hamradio/baycom_ser_fdx.c.
- baycom_ser_hdx= [HW,AX25] BayCom Serial Port AX.25 Modem (Half Duplex Mode)
+ baycom_ser_hdx= [HW,AX25]
+ BayCom Serial Port AX.25 Modem (Half Duplex Mode)
Format: <io>,<irq>,<mode>
See header of drivers/net/hamradio/baycom_ser_hdx.c.
@@ -292,7 +295,8 @@ running once the system is up.
blkmtd_count=
bttv.card= [HW,V4L] bttv (bt848 + bt878 based grabber cards)
- bttv.radio= Most important insmod options are available as kernel args too.
+ bttv.radio= Most important insmod options are available as
+ kernel args too.
bttv.pll= See Documentation/video4linux/bttv/Insmod-options
bttv.tuner= and Documentation/video4linux/bttv/CARDLIST
@@ -318,15 +322,17 @@ running once the system is up.
checkreqprot [SELINUX] Set initial checkreqprot flag value.
Format: { "0" | "1" }
See security/selinux/Kconfig help text.
- 0 -- check protection applied by kernel (includes any implied execute protection).
+ 0 -- check protection applied by kernel (includes
+ any implied execute protection).
1 -- check protection requested by application.
Default value is set via a kernel config option.
- Value can be changed at runtime via /selinux/checkreqprot.
-
- clock= [BUGS=IA-32, HW] gettimeofday timesource override.
+ Value can be changed at runtime via
+ /selinux/checkreqprot.
+
+ clock= [BUGS=IA-32,HW] gettimeofday timesource override.
Forces specified timesource (if avaliable) to be used
- when calculating gettimeofday(). If specicified timesource
- is not avalible, it defaults to PIT.
+ when calculating gettimeofday(). If specicified
+ timesource is not avalible, it defaults to PIT.
Format: { pit | tsc | cyclone | pmtmr }
hpet= [IA-32,HPET] option to disable HPET and use PIT.
@@ -336,17 +342,19 @@ running once the system is up.
Format: { auto | [<io>,][<irq>] }
com20020= [HW,NET] ARCnet - COM20020 chipset
- Format: <io>[,<irq>[,<nodeID>[,<backplane>[,<ckp>[,<timeout>]]]]]
+ Format:
+ <io>[,<irq>[,<nodeID>[,<backplane>[,<ckp>[,<timeout>]]]]]
com90io= [HW,NET] ARCnet - COM90xx chipset (IO-mapped buffers)
Format: <io>[,<irq>]
- com90xx= [HW,NET] ARCnet - COM90xx chipset (memory-mapped buffers)
+ com90xx= [HW,NET]
+ ARCnet - COM90xx chipset (memory-mapped buffers)
Format: <io>[,<irq>[,<memstart>]]
condev= [HW,S390] console device
conmode=
-
+
console= [KNL] Output console device and options.
tty<n> Use the virtual console device <n>.
@@ -367,7 +375,8 @@ running once the system is up.
options are the same as for ttyS, above.
cpcihp_generic= [HW,PCI] Generic port I/O CompactPCI driver
- Format: <first_slot>,<last_slot>,<port>,<enum_bit>[,<debug>]
+ Format:
+ <first_slot>,<last_slot>,<port>,<enum_bit>[,<debug>]
cpia_pp= [HW,PPT]
Format: { parport<nr> | auto | none }
@@ -384,10 +393,10 @@ running once the system is up.
cs89x0_media= [HW,NET]
Format: { rj45 | aui | bnc }
-
+
cyclades= [HW,SERIAL] Cyclades multi-serial port adapter.
-
- dasd= [HW,NET]
+
+ dasd= [HW,NET]
See header of drivers/s390/block/dasd_devmap.c.
db9.dev[2|3]= [HW,JOY] Multisystem joystick support via parallel port
@@ -406,7 +415,7 @@ running once the system is up.
dhash_entries= [KNL]
Set number of hash buckets for dentry cache.
-
+
digi= [HW,SERIAL]
IO parameters + enable/disable command.
@@ -424,11 +433,11 @@ running once the system is up.
dtc3181e= [HW,SCSI]
- earlyprintk= [IA-32, X86-64]
+ earlyprintk= [IA-32,X86-64]
earlyprintk=vga
earlyprintk=serial[,ttySn[,baudrate]]
- Append ,keep to not disable it when the real console
+ Append ",keep" to not disable it when the real console
takes over.
Only vga or serial at a time, not both.
@@ -451,7 +460,7 @@ running once the system is up.
Format: {"of[f]" | "sk[ipmbr]"}
See comment in arch/i386/boot/edd.S
- eicon= [HW,ISDN]
+ eicon= [HW,ISDN]
Format: <id>,<membase>,<irq>
eisa_irq_edge= [PARISC,HW]
@@ -462,12 +471,13 @@ running once the system is up.
arch/i386/kernel/cpu/cpufreq/elanfreq.c.
elevator= [IOSCHED]
- Format: {"as"|"cfq"|"deadline"|"noop"}
- See Documentation/block/as-iosched.txt
- and Documentation/block/deadline-iosched.txt for details.
+ Format: {"as" | "cfq" | "deadline" | "noop"}
+ See Documentation/block/as-iosched.txt and
+ Documentation/block/deadline-iosched.txt for details.
+
elfcorehdr= [IA-32]
- Specifies physical address of start of kernel core image
- elf header.
+ Specifies physical address of start of kernel core
+ image elf header.
See Documentation/kdump.txt for details.
enforcing [SELINUX] Set initial enforcing status.
@@ -485,7 +495,7 @@ running once the system is up.
es1371= [HW,OSS]
Format: <spdif>,[<nomix>,[<amplifier>]]
See also header of sound/oss/es1371.c.
-
+
ether= [HW,NET] Ethernet cards parameters
This option is obsoleted by the "netdev=" option, which
has equivalent usage. See its documentation for details.
@@ -526,12 +536,13 @@ running once the system is up.
gus= [HW,OSS]
Format: <io>,<irq>,<dma>,<dma16>
-
+
gvp11= [HW,SCSI]
hashdist= [KNL,NUMA] Large hashes allocated during boot
are distributed across NUMA nodes. Defaults on
for IA-64, off otherwise.
+ Format: 0 | 1 (for off | on)
hcl= [IA-64] SGI's Hardware Graph compatibility layer
@@ -595,13 +606,13 @@ running once the system is up.
ide?= [HW] (E)IDE subsystem
Format: ide?=noprobe or chipset specific parameters.
See Documentation/ide.txt.
-
+
idebus= [HW] (E)IDE subsystem - VLB/PCI bus speed
See Documentation/ide.txt.
idle= [HW]
Format: idle=poll or idle=halt
-
+
ihash_entries= [KNL]
Set number of hash buckets for inode cache.
@@ -649,7 +660,7 @@ running once the system is up.
firmware running.
isapnp= [ISAPNP]
- Format: <RDP>, <reset>, <pci_scan>, <verbosity>
+ Format: <RDP>,<reset>,<pci_scan>,<verbosity>
isolcpus= [KNL,SMP] Isolate CPUs from the general scheduler.
Format: <cpu number>,...,<cpu number>
@@ -661,32 +672,33 @@ running once the system is up.
"number of CPUs in system - 1".
This option is the preferred way to isolate CPUs. The
- alternative - manually setting the CPU mask of all tasks
- in the system can cause problems and suboptimal load
- balancer performance.
+ alternative -- manually setting the CPU mask of all
+ tasks in the system -- can cause problems and
+ suboptimal load balancer performance.
isp16= [HW,CD]
Format: <io>,<irq>,<dma>,<setup>
- iucv= [HW,NET]
+ iucv= [HW,NET]
js= [HW,JOY] Analog joystick
See Documentation/input/joystick.txt.
keepinitrd [HW,ARM]
- kstack=N [IA-32, X86-64] Print N words from the kernel stack
+ kstack=N [IA-32,X86-64] Print N words from the kernel stack
in oops dumps.
l2cr= [PPC]
- lapic [IA-32,APIC] Enable the local APIC even if BIOS disabled it.
+ lapic [IA-32,APIC] Enable the local APIC even if BIOS
+ disabled it.
lasi= [HW,SCSI] PARISC LASI driver for the 53c700 chip
Format: addr:<io>,irq:<irq>
- llsc*= [IA64]
- See function print_params() in arch/ia64/sn/kernel/llsc4.c.
+ llsc*= [IA64] See function print_params() in
+ arch/ia64/sn/kernel/llsc4.c.
load_ramdisk= [RAM] List of ramdisks to load from floppy
See Documentation/ramdisk.txt.
@@ -713,8 +725,9 @@ running once the system is up.
7 (KERN_DEBUG) debug-level messages
log_buf_len=n Sets the size of the printk ring buffer, in bytes.
- Format is n, nk, nM. n must be a power of two. The
- default is set in kernel config.
+ Format: { n | nk | nM }
+ n must be a power of two. The default size
+ is set in the kernel config file.
lp=0 [LP] Specify parallel ports to use, e.g,
lp=port[,port...] lp=none,parport0 (lp0 not configured, lp1 uses
@@ -750,23 +763,23 @@ running once the system is up.
ltpc= [NET]
Format: <io>,<irq>,<dma>
- mac5380= [HW,SCSI]
- Format: <can_queue>,<cmd_per_lun>,<sg_tablesize>,<hostid>,<use_tags>
+ mac5380= [HW,SCSI] Format:
+ <can_queue>,<cmd_per_lun>,<sg_tablesize>,<hostid>,<use_tags>
- mac53c9x= [HW,SCSI]
- Format: <num_esps>,<disconnect>,<nosync>,<can_queue>,<cmd_per_lun>,<sg_tablesize>,<hostid>,<use_tags>
+ mac53c9x= [HW,SCSI] Format:
+ <num_esps>,<disconnect>,<nosync>,<can_queue>,<cmd_per_lun>,<sg_tablesize>,<hostid>,<use_tags>
- machvec= [IA64]
- Force the use of a particular machine-vector (machvec) in a generic
- kernel. Example: machvec=hpzx1_swiotlb
+ machvec= [IA64] Force the use of a particular machine-vector
+ (machvec) in a generic kernel.
+ Example: machvec=hpzx1_swiotlb
- mad16= [HW,OSS]
- Format: <io>,<irq>,<dma>,<dma16>,<mpu_io>,<mpu_irq>,<joystick>
+ mad16= [HW,OSS] Format:
+ <io>,<irq>,<dma>,<dma16>,<mpu_io>,<mpu_irq>,<joystick>
maui= [HW,OSS]
Format: <io>,<irq>
-
- max_loop= [LOOP] Maximum number of loopback devices that can
+
+ max_loop= [LOOP] Maximum number of loopback devices that can
be mounted
Format: <1-256>
@@ -776,11 +789,11 @@ running once the system is up.
max_addr=[KMG] [KNL,BOOT,ia64] All physical memory greater than or
equal to this physical address is ignored.
- max_luns= [SCSI] Maximum number of LUNs to probe
+ max_luns= [SCSI] Maximum number of LUNs to probe.
Should be between 1 and 2^32-1.
max_report_luns=
- [SCSI] Maximum number of LUNs received
+ [SCSI] Maximum number of LUNs received.
Should be between 1 and 16384.
mca-pentium [BUGS=IA-32]
@@ -796,11 +809,11 @@ running once the system is up.
md= [HW] RAID subsystems devices and level
See Documentation/md.txt.
-
+
mdacon= [MDA]
Format: <first>,<last>
Specifies range of consoles to be captured by the MDA.
-
+
mem=nn[KMG] [KNL,BOOT] Force usage of a specific amount of memory
Amount of memory to be used when the kernel is not able
to see the whole system memory or for test.
@@ -851,15 +864,15 @@ running once the system is up.
MTD_Partition= [MTD]
Format: <name>,<region-number>,<size>,<offset>
- MTD_Region= [MTD]
- Format: <name>,<region-number>[,<base>,<size>,<buswidth>,<altbuswidth>]
+ MTD_Region= [MTD] Format:
+ <name>,<region-number>[,<base>,<size>,<buswidth>,<altbuswidth>]
mtdparts= [MTD]
See drivers/mtd/cmdline.c.
mtouchusb.raw_coordinates=
- [HW] Make the MicroTouch USB driver use raw coordinates ('y', default)
- or cooked coordinates ('n')
+ [HW] Make the MicroTouch USB driver use raw coordinates
+ ('y', default) or cooked coordinates ('n')
n2= [NET] SDL Inc. RISCom/N2 synchronous serial card
@@ -880,7 +893,9 @@ running once the system is up.
Format: <irq>,<io>,<mem_start>,<mem_end>,<name>
Note that mem_start is often overloaded to mean
something different and driver-specific.
-
+ This usage is only documented in each driver source
+ file if at all.
+
nfsaddrs= [NFS]
See Documentation/nfsroot.txt.
@@ -893,8 +908,8 @@ running once the system is up.
emulation library even if a 387 maths coprocessor
is present.
- noalign [KNL,ARM]
-
+ noalign [KNL,ARM]
+
noapic [SMP,APIC] Tells the kernel to not make use of any
IOAPICs that may be present in the system.
@@ -905,19 +920,19 @@ running once the system is up.
on "Classic" PPC cores.
nocache [ARM]
-
+
nodisconnect [HW,SCSI,M68K] Disables SCSI disconnects.
noexec [IA-64]
- noexec [IA-32, X86-64]
+ noexec [IA-32,X86-64]
noexec=on: enable non-executable mappings (default)
noexec=off: disable nn-executable mappings
nofxsr [BUGS=IA-32]
nohlt [BUGS=ARM]
-
+
no-hlt [BUGS=IA-32] Tells the kernel that the hlt
instruction doesn't work correctly and not to
use it.
@@ -948,8 +963,9 @@ running once the system is up.
noresidual [PPC] Don't use residual data on PReP machines.
- noresume [SWSUSP] Disables resume and restore original swap space.
-
+ noresume [SWSUSP] Disables resume and restores original swap
+ space.
+
no-scroll [VGA] Disables scrollback.
This is required for the Braillex ib80-piezo Braille
reader made by F.H. Papenmeier (Germany).
@@ -965,16 +981,16 @@ running once the system is up.
nousb [USB] Disable the USB subsystem
nowb [ARM]
-
+
opl3= [HW,OSS]
Format: <io>
opl3sa= [HW,OSS]
Format: <io>,<irq>,<dma>,<dma2>,<mpu_io>,<mpu_irq>
- opl3sa2= [HW,OSS]
- Format: <io>,<irq>,<dma>,<dma2>,<mss_io>,<mpu_io>,<ymode>,<loopback>[,<isapnp>,<multiple]
-
+ opl3sa2= [HW,OSS] Format:
+ <io>,<irq>,<dma>,<dma2>,<mss_io>,<mpu_io>,<ymode>,<loopback>[,<isapnp>,<multiple]
+
oprofile.timer= [HW]
Use timer interrupt instead of performance counters
@@ -993,36 +1009,33 @@ running once the system is up.
Format: <parport#>
parkbd.mode= [HW] Parallel port keyboard adapter mode of operation,
0 for XT, 1 for AT (default is AT).
- Format: <mode>
-
- parport=0 [HW,PPT] Specify parallel ports. 0 disables.
- parport=auto Use 'auto' to force the driver to use
- parport=0xBBB[,IRQ[,DMA]] any IRQ/DMA settings detected (the
- default is to ignore detected IRQ/DMA
- settings because of possible
- conflicts). You can specify the base
- address, IRQ, and DMA settings; IRQ and
- DMA should be numbers, or 'auto' (for
- using detected settings on that
- particular port), or 'nofifo' (to avoid
- using a FIFO even if it is detected).
- Parallel ports are assigned in the
- order they are specified on the command
- line, starting with parport0.
-
- parport_init_mode=
- [HW,PPT] Configure VIA parallel port to
- operate in specific mode. This is
- necessary on Pegasos computer where
- firmware has no options for setting up
- parallel port mode and sets it to
- spp. Currently this function knows
- 686a and 8231 chips.
+ Format: <mode>
+
+ parport= [HW,PPT] Specify parallel ports. 0 disables.
+ Format: { 0 | auto | 0xBBB[,IRQ[,DMA]] }
+ Use 'auto' to force the driver to use any
+ IRQ/DMA settings detected (the default is to
+ ignore detected IRQ/DMA settings because of
+ possible conflicts). You can specify the base
+ address, IRQ, and DMA settings; IRQ and DMA
+ should be numbers, or 'auto' (for using detected
+ settings on that particular port), or 'nofifo'
+ (to avoid using a FIFO even if it is detected).
+ Parallel ports are assigned in the order they
+ are specified on the command line, starting
+ with parport0.
+
+ parport_init_mode= [HW,PPT]
+ Configure VIA parallel port to operate in
+ a specific mode. This is necessary on Pegasos
+ computer where firmware has no options for setting
+ up parallel port mode and sets it to spp.
+ Currently this function knows 686a and 8231 chips.
Format: [spp|ps2|epp|ecp|ecpepp]
- pas2= [HW,OSS]
- Format: <io>,<irq>,<dma>,<dma16>,<sb_io>,<sb_irq>,<sb_dma>,<sb_dma16>
-
+ pas2= [HW,OSS] Format:
+ <io>,<irq>,<dma>,<dma16>,<sb_io>,<sb_irq>,<sb_dma>,<sb_dma16>
+
pas16= [HW,SCSI]
See header of drivers/scsi/pas16.c.
@@ -1032,64 +1045,67 @@ running once the system is up.
See header of drivers/block/paride/pcd.c.
See also Documentation/paride.txt.
- pci=option[,option...] [PCI] various PCI subsystem options:
- off [IA-32] don't probe for the PCI bus
- bios [IA-32] force use of PCI BIOS, don't access
- the hardware directly. Use this if your machine
- has a non-standard PCI host bridge.
- nobios [IA-32] disallow use of PCI BIOS, only direct
- hardware access methods are allowed. Use this
- if you experience crashes upon bootup and you
- suspect they are caused by the BIOS.
- conf1 [IA-32] Force use of PCI Configuration Mechanism 1.
- conf2 [IA-32] Force use of PCI Configuration Mechanism 2.
- nosort [IA-32] Don't sort PCI devices according to
- order given by the PCI BIOS. This sorting is done
- to get a device order compatible with older kernels.
- biosirq [IA-32] Use PCI BIOS calls to get the interrupt
- routing table. These calls are known to be buggy
- on several machines and they hang the machine when used,
- but on other computers it's the only way to get the
- interrupt routing table. Try this option if the kernel
- is unable to allocate IRQs or discover secondary PCI
- buses on your motherboard.
- rom [IA-32] Assign address space to expansion ROMs.
- Use with caution as certain devices share address
- decoders between ROMs and other resources.
- irqmask=0xMMMM [IA-32] Set a bit mask of IRQs allowed to be assigned
- automatically to PCI devices. You can make the kernel
- exclude IRQs of your ISA cards this way.
+ pci=option[,option...] [PCI] various PCI subsystem options:
+ off [IA-32] don't probe for the PCI bus
+ bios [IA-32] force use of PCI BIOS, don't access
+ the hardware directly. Use this if your machine
+ has a non-standard PCI host bridge.
+ nobios [IA-32] disallow use of PCI BIOS, only direct
+ hardware access methods are allowed. Use this
+ if you experience crashes upon bootup and you
+ suspect they are caused by the BIOS.
+ conf1 [IA-32] Force use of PCI Configuration
+ Mechanism 1.
+ conf2 [IA-32] Force use of PCI Configuration
+ Mechanism 2.
+ nosort [IA-32] Don't sort PCI devices according to
+ order given by the PCI BIOS. This sorting is
+ done to get a device order compatible with
+ older kernels.
+ biosirq [IA-32] Use PCI BIOS calls to get the interrupt
+ routing table. These calls are known to be buggy
+ on several machines and they hang the machine
+ when used, but on other computers it's the only
+ way to get the interrupt routing table. Try
+ this option if the kernel is unable to allocate
+ IRQs or discover secondary PCI buses on your
+ motherboard.
+ rom [IA-32] Assign address space to expansion ROMs.
+ Use with caution as certain devices share
+ address decoders between ROMs and other
+ resources.
+ irqmask=0xMMMM [IA-32] Set a bit mask of IRQs allowed to be
+ assigned automatically to PCI devices. You can
+ make the kernel exclude IRQs of your ISA cards
+ this way.
pirqaddr=0xAAAAA [IA-32] Specify the physical address
- of the PIRQ table (normally generated
- by the BIOS) if it is outside the
- F0000h-100000h range.
- lastbus=N [IA-32] Scan all buses till bus #N. Can be useful
- if the kernel is unable to find your secondary buses
- and you want to tell it explicitly which ones they are.
- assign-busses [IA-32] Always assign all PCI bus
- numbers ourselves, overriding
- whatever the firmware may have
- done.
- usepirqmask [IA-32] Honor the possible IRQ mask
- stored in the BIOS $PIR table. This is
- needed on some systems with broken
- BIOSes, notably some HP Pavilion N5400
- and Omnibook XE3 notebooks. This will
- have no effect if ACPI IRQ routing is
- enabled.
- noacpi [IA-32] Do not use ACPI for IRQ routing
- or for PCI scanning.
- routeirq Do IRQ routing for all PCI devices.
- This is normally done in pci_enable_device(),
- so this option is a temporary workaround
- for broken drivers that don't call it.
-
- firmware [ARM] Do not re-enumerate the bus but
- instead just use the configuration
- from the bootloader. This is currently
- used on IXP2000 systems where the
- bus has to be configured a certain way
- for adjunct CPUs.
+ of the PIRQ table (normally generated
+ by the BIOS) if it is outside the
+ F0000h-100000h range.
+ lastbus=N [IA-32] Scan all buses thru bus #N. Can be
+ useful if the kernel is unable to find your
+ secondary buses and you want to tell it
+ explicitly which ones they are.
+ assign-busses [IA-32] Always assign all PCI bus
+ numbers ourselves, overriding
+ whatever the firmware may have done.
+ usepirqmask [IA-32] Honor the possible IRQ mask stored
+ in the BIOS $PIR table. This is needed on
+ some systems with broken BIOSes, notably
+ some HP Pavilion N5400 and Omnibook XE3
+ notebooks. This will have no effect if ACPI
+ IRQ routing is enabled.
+ noacpi [IA-32] Do not use ACPI for IRQ routing
+ or for PCI scanning.
+ routeirq Do IRQ routing for all PCI devices.
+ This is normally done in pci_enable_device(),
+ so this option is a temporary workaround
+ for broken drivers that don't call it.
+ firmware [ARM] Do not re-enumerate the bus but instead
+ just use the configuration from the
+ bootloader. This is currently used on
+ IXP2000 systems where the bus has to be
+ configured a certain way for adjunct CPUs.
pcmv= [HW,PCMCIA] BadgePAD 4
@@ -1127,19 +1143,20 @@ running once the system is up.
[ISAPNP] Exclude DMAs for the autoconfiguration
pnp_reserve_io= [ISAPNP] Exclude I/O ports for the autoconfiguration
- Ranges are in pairs (I/O port base and size).
+ Ranges are in pairs (I/O port base and size).
pnp_reserve_mem=
- [ISAPNP] Exclude memory regions for the autoconfiguration
+ [ISAPNP] Exclude memory regions for the
+ autoconfiguration.
Ranges are in pairs (memory base and size).
profile= [KNL] Enable kernel profiling via /proc/profile
- { schedule | <number> }
- (param: schedule - profile schedule points}
- (param: profile step/bucket size as a power of 2 for
- statistical time based profiling)
+ Format: [schedule,]<number>
+ Param: "schedule" - profile schedule points.
+ Param: <number> - step/bucket size as a power of 2 for
+ statistical time based profiling.
- processor.max_cstate= [HW, ACPI]
+ processor.max_cstate= [HW,ACPI]
Limit processor to maximum C-state
max_cstate=9 overrides any DMI blacklist limit.
@@ -1147,27 +1164,28 @@ running once the system is up.
before loading.
See Documentation/ramdisk.txt.
- psmouse.proto= [HW,MOUSE] Highest PS2 mouse protocol extension to
- probe for (bare|imps|exps|lifebook|any).
+ psmouse.proto= [HW,MOUSE] Highest PS2 mouse protocol extension to
+ probe for; one of (bare|imps|exps|lifebook|any).
psmouse.rate= [HW,MOUSE] Set desired mouse report rate, in reports
per second.
- psmouse.resetafter=
- [HW,MOUSE] Try to reset the device after so many bad packets
+ psmouse.resetafter= [HW,MOUSE]
+ Try to reset the device after so many bad packets
(0 = never).
psmouse.resolution=
[HW,MOUSE] Set desired mouse resolution, in dpi.
psmouse.smartscroll=
- [HW,MOUSE] Controls Logitech smartscroll autorepeat,
+ [HW,MOUSE] Controls Logitech smartscroll autorepeat.
0 = disabled, 1 = enabled (default).
pss= [HW,OSS] Personal Sound System (ECHO ESC614)
- Format: <io>,<mss_io>,<mss_irq>,<mss_dma>,<mpu_io>,<mpu_irq>
+ Format:
+ <io>,<mss_io>,<mss_irq>,<mss_dma>,<mpu_io>,<mpu_irq>
pt. [PARIDE]
See Documentation/paride.txt.
quiet= [KNL] Disable log messages
-
+
r128= [HW,DRM]
raid= [HW,RAID]
@@ -1176,10 +1194,9 @@ running once the system is up.
ramdisk= [RAM] Sizes of RAM disks in kilobytes [deprecated]
See Documentation/ramdisk.txt.
- ramdisk_blocksize=
- [RAM]
+ ramdisk_blocksize= [RAM]
See Documentation/ramdisk.txt.
-
+
ramdisk_size= [RAM] Sizes of RAM disks in kilobytes
New name for the ramdisk parameter.
See Documentation/ramdisk.txt.
@@ -1195,7 +1212,8 @@ running once the system is up.
reserve= [KNL,BUGS] Force the kernel to ignore some iomem area
- resume= [SWSUSP] Specify the partition device for software suspension
+ resume= [SWSUSP]
+ Specify the partition device for software suspend
rhash_entries= [KNL,NET]
Set number of hash buckets for route cache
@@ -1225,7 +1243,7 @@ running once the system is up.
Format: <io>,<irq>,<dma>,<dma2>
sbni= [NET] Granch SBNI12 leased line adapter
-
+
sbpcd= [HW,CD] Soundblaster CD adapter
Format: <io>,<type>
See a comment before function sbpcd_setup() in
@@ -1258,21 +1276,20 @@ running once the system is up.
serialnumber [BUGS=IA-32]
- sg_def_reserved_size=
- [SCSI]
-
+ sg_def_reserved_size= [SCSI]
+
sgalaxy= [HW,OSS]
Format: <io>,<irq>,<dma>,<dma2>,<sgbase>
shapers= [NET]
Maximal number of shapers.
-
+
sim710= [SCSI,HW]
See header of drivers/scsi/sim710.c.
simeth= [IA-64]
simscsi=
-
+
sjcd= [HW,CD]
Format: <io>,<irq>,<dma>
See header of drivers/cdrom/sjcd.c.
@@ -1403,10 +1420,10 @@ running once the system is up.
snd-wavefront= [HW,ALSA]
snd-ymfpci= [HW,ALSA]
-
+
sonicvibes= [HW,OSS]
Format: <reverb>
-
+
sonycd535= [HW,CD]
Format: <io>[,<irq>]
@@ -1423,7 +1440,7 @@ running once the system is up.
sscape= [HW,OSS]
Format: <io>,<irq>,<dma>,<mpu_io>,<mpu_irq>
-
+
st= [HW,SCSI] SCSI tape parameters (buffers, etc.)
See Documentation/scsi/st.txt.
@@ -1446,7 +1463,7 @@ running once the system is up.
stram_swap= [HW,M68k]
swiotlb= [IA-64] Number of I/O TLB slabs
-
+
switches= [HW,M68k]
sym53c416= [HW,SCSI]
@@ -1479,14 +1496,16 @@ running once the system is up.
tp720= [HW,PS2]
trix= [HW,OSS] MediaTrix AudioTrix Pro
- Format: <io>,<irq>,<dma>,<dma2>,<sb_io>,<sb_irq>,<sb_dma>,<mpu_io>,<mpu_irq>
-
+ Format:
+ <io>,<irq>,<dma>,<dma2>,<sb_io>,<sb_irq>,<sb_dma>,<mpu_io>,<mpu_irq>
+
tsdev.xres= [TS] Horizontal screen resolution.
tsdev.yres= [TS] Vertical screen resolution.
- turbografx.map[2|3]=
- [HW,JOY] TurboGraFX parallel port interface
- Format: <port#>,<js1>,<js2>,<js3>,<js4>,<js5>,<js6>,<js7>
+ turbografx.map[2|3]= [HW,JOY]
+ TurboGraFX parallel port interface
+ Format:
+ <port#>,<js1>,<js2>,<js3>,<js4>,<js5>,<js6>,<js7>
See also Documentation/input/joystick-parport.txt
u14-34f= [HW,SCSI] UltraStor 14F/34F SCSI host adapter
@@ -1502,17 +1521,18 @@ running once the system is up.
usbhid.mousepoll=
[USBHID] The interval which mice are to be polled at.
-
+
video= [FB] Frame buffer configuration
See Documentation/fb/modedb.txt.
vga= [BOOT,IA-32] Select a particular video mode
- See Documentation/i386/boot.txt and Documentation/svga.txt.
+ See Documentation/i386/boot.txt and
+ Documentation/svga.txt.
Use vga=ask for menu.
This is actually a boot loader parameter; the value is
passed to the kernel using a special protocol.
- vmalloc=nn[KMG] [KNL,BOOT] forces the vmalloc area to have an exact
+ vmalloc=nn[KMG] [KNL,BOOT] Forces the vmalloc area to have an exact
size of <nn>. This can be used to increase the
minimum size (128MB on x86). It can also be used to
decrease the size and leave more room for directly
@@ -1520,11 +1540,11 @@ running once the system is up.
vmhalt= [KNL,S390]
- vmpoff= [KNL,S390]
-
+ vmpoff= [KNL,S390]
+
waveartist= [HW,OSS]
Format: <io>,<irq>,<dma>,<dma2>
-
+
wd33c93= [HW,SCSI]
See header of drivers/scsi/wd33c93.c.
@@ -1538,21 +1558,25 @@ running once the system is up.
xd_geo= See header of drivers/block/xd.c.
xirc2ps_cs= [NET,PCMCIA]
- Format: <irq>,<irq_mask>,<io>,<full_duplex>,<do_sound>,<lockup_hack>[,<irq2>[,<irq3>[,<irq4>]]]
-
+ Format:
+ <irq>,<irq_mask>,<io>,<full_duplex>,<do_sound>,<lockup_hack>[,<irq2>[,<irq3>[,<irq4>]]]
+______________________________________________________________________
Changelog:
+2000-06-?? Mr. Unknown
The last known update (for 2.4.0) - the changelog was not kept before.
- 2000-06-?? Mr. Unknown
+2002-11-24 Petr Baudis <pasky@ucw.cz>
+ Randy Dunlap <randy.dunlap@verizon.net>
Update for 2.5.49, description for most of the options introduced,
references to other documentation (C files, READMEs, ..), added S390,
PPC, SPARC, MTD, ALSA and OSS category. Minor corrections and
reformatting.
- 2002-11-24 Petr Baudis <pasky@ucw.cz>
- Randy Dunlap <randy.dunlap@verizon.net>
+
+2005-10-19 Randy Dunlap <rdunlap@xenotime.net>
+ Lots of typos, whitespace, some reformatting.
TODO:
diff --git a/Documentation/networking/bonding.txt b/Documentation/networking/bonding.txt
index a55f0f95b17..b0fe41da007 100644
--- a/Documentation/networking/bonding.txt
+++ b/Documentation/networking/bonding.txt
@@ -777,7 +777,7 @@ doing so is the same as described in the "Configuring Multiple Bonds
Manually" section, below.
NOTE: It has been observed that some Red Hat supplied kernels
-are apparently unable to rename modules at load time (the "-obonding1"
+are apparently unable to rename modules at load time (the "-o bond1"
part). Attempts to pass that option to modprobe will produce an
"Operation not permitted" error. This has been reported on some
Fedora Core kernels, and has been seen on RHEL 4 as well. On kernels
@@ -883,7 +883,8 @@ the above does not work, and the second bonding instance never sees
its options. In that case, the second options line can be substituted
as follows:
-install bonding1 /sbin/modprobe bonding -obond1 mode=balance-alb miimon=50
+install bond1 /sbin/modprobe --ignore-install bonding -o bond1 \
+ mode=balance-alb miimon=50
This may be repeated any number of times, specifying a new and
unique name in place of bond1 for each subsequent instance.
diff --git a/Makefile b/Makefile
index 4a7000e353b..f1d121f2302 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 2
PATCHLEVEL = 6
SUBLEVEL = 14
-EXTRAVERSION =-rc5
+EXTRAVERSION =
NAME=Affluent Albatross
# *DOCUMENTATION*
@@ -334,7 +334,7 @@ KALLSYMS = scripts/kallsyms
PERL = perl
CHECK = sparse
-CHECKFLAGS := -D__linux__ -Dlinux -D__STDC__ -Dunix -D__unix__ $(CF)
+CHECKFLAGS := -D__linux__ -Dlinux -D__STDC__ -Dunix -D__unix__ -Wbitwise $(CF)
MODFLAGS = -DMODULE
CFLAGS_MODULE = $(MODFLAGS)
AFLAGS_MODULE = $(MODFLAGS)
diff --git a/arch/alpha/kernel/pci-noop.c b/arch/alpha/kernel/pci-noop.c
index 582a3519fb2..9903e3a7910 100644
--- a/arch/alpha/kernel/pci-noop.c
+++ b/arch/alpha/kernel/pci-noop.c
@@ -154,7 +154,7 @@ pci_dma_supported(struct pci_dev *hwdev, dma_addr_t mask)
void *
dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, int gfp)
+ dma_addr_t *dma_handle, gfp_t gfp)
{
void *ret;
diff --git a/arch/alpha/kernel/pci_iommu.c b/arch/alpha/kernel/pci_iommu.c
index 7cb23f12ecb..c468e312e5f 100644
--- a/arch/alpha/kernel/pci_iommu.c
+++ b/arch/alpha/kernel/pci_iommu.c
@@ -397,7 +397,7 @@ pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp)
{
void *cpu_addr;
long order = get_order(size);
- int gfp = GFP_ATOMIC;
+ gfp_t gfp = GFP_ATOMIC;
try_again:
cpu_addr = (void *)__get_free_pages(gfp, order);
diff --git a/arch/arm/mach-integrator/impd1.c b/arch/arm/mach-integrator/impd1.c
index c3c2f17d030..a1b153d1626 100644
--- a/arch/arm/mach-integrator/impd1.c
+++ b/arch/arm/mach-integrator/impd1.c
@@ -67,7 +67,7 @@ static void impd1_setvco(struct clk *clk, struct icst525_vco vco)
}
writel(0, impd1->base + IMPD1_LOCK);
-#if DEBUG
+#ifdef DEBUG
vco.v = val & 0x1ff;
vco.r = (val >> 9) & 0x7f;
vco.s = (val >> 16) & 7;
@@ -427,17 +427,18 @@ static int impd1_probe(struct lm_device *dev)
return ret;
}
+static int impd1_remove_one(struct device *dev, void *data)
+{
+ device_unregister(dev);
+ return 0;
+}
+
static void impd1_remove(struct lm_device *dev)
{
struct impd1_module *impd1 = lm_get_drvdata(dev);
- struct list_head *l, *n;
int i;
- list_for_each_safe(l, n, &dev->dev.children) {
- struct device *d = list_to_dev(l);
-
- device_unregister(d);
- }
+ device_for_each_child(&dev->dev, NULL, impd1_remove_one);
for (i = 0; i < ARRAY_SIZE(impd1->vcos); i++)
clk_unregister(&impd1->vcos[i]);
diff --git a/arch/arm/mach-pxa/corgi_lcd.c b/arch/arm/mach-pxa/corgi_lcd.c
index 850538fadec..370df113dc0 100644
--- a/arch/arm/mach-pxa/corgi_lcd.c
+++ b/arch/arm/mach-pxa/corgi_lcd.c
@@ -488,6 +488,7 @@ static int is_pxafb_device(struct device * dev, void * data)
unsigned long spitz_get_hsync_len(void)
{
+#ifdef CONFIG_FB_PXA
if (!spitz_pxafb_dev) {
spitz_pxafb_dev = bus_find_device(&platform_bus_type, NULL, NULL, is_pxafb_device);
if (!spitz_pxafb_dev)
@@ -496,6 +497,7 @@ unsigned long spitz_get_hsync_len(void)
if (!get_hsync_time)
get_hsync_time = symbol_get(pxafb_get_hsync_time);
if (!get_hsync_time)
+#endif
return 0;
return pxafb_get_hsync_time(spitz_pxafb_dev);
diff --git a/arch/arm/mach-pxa/generic.c b/arch/arm/mach-pxa/generic.c
index d327c127edd..1d7677669a7 100644
--- a/arch/arm/mach-pxa/generic.c
+++ b/arch/arm/mach-pxa/generic.c
@@ -250,6 +250,25 @@ void __init pxa_set_i2c_info(struct i2c_pxa_platform_data *info)
i2c_device.dev.platform_data = info;
}
+static struct resource i2s_resources[] = {
+ {
+ .start = 0x40400000,
+ .end = 0x40400083,
+ .flags = IORESOURCE_MEM,
+ }, {
+ .start = IRQ_I2S,
+ .end = IRQ_I2S,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device i2s_device = {
+ .name = "pxa2xx-i2s",
+ .id = -1,
+ .resource = i2c_resources,
+ .num_resources = ARRAY_SIZE(i2s_resources),
+};
+
static struct platform_device *devices[] __initdata = {
&pxamci_device,
&udc_device,
@@ -258,6 +277,7 @@ static struct platform_device *devices[] __initdata = {
&btuart_device,
&stuart_device,
&i2c_device,
+ &i2s_device,
};
static int __init pxa_init(void)
diff --git a/arch/arm/mach-s3c2410/mach-bast.c b/arch/arm/mach-s3c2410/mach-bast.c
index 8ca95598464..7b51bfd0ba6 100644
--- a/arch/arm/mach-s3c2410/mach-bast.c
+++ b/arch/arm/mach-s3c2410/mach-bast.c
@@ -307,9 +307,9 @@ static void bast_nand_select(struct s3c2410_nand_set *set, int slot)
}
static struct s3c2410_platform_nand bast_nand_info = {
- .tacls = 40,
- .twrph0 = 80,
- .twrph1 = 80,
+ .tacls = 30,
+ .twrph0 = 60,
+ .twrph1 = 60,
.nr_sets = ARRAY_SIZE(bast_nand_sets),
.sets = bast_nand_sets,
.select_chip = bast_nand_select,
diff --git a/arch/arm/mm/consistent.c b/arch/arm/mm/consistent.c
index 26356ce4da5..82f4d5e27c5 100644
--- a/arch/arm/mm/consistent.c
+++ b/arch/arm/mm/consistent.c
@@ -75,7 +75,7 @@ static struct vm_region consistent_head = {
};
static struct vm_region *
-vm_region_alloc(struct vm_region *head, size_t size, int gfp)
+vm_region_alloc(struct vm_region *head, size_t size, gfp_t gfp)
{
unsigned long addr = head->vm_start, end = head->vm_end - size;
unsigned long flags;
@@ -133,7 +133,7 @@ static struct vm_region *vm_region_find(struct vm_region *head, unsigned long ad
#endif
static void *
-__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, int gfp,
+__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
pgprot_t prot)
{
struct page *page;
@@ -251,7 +251,7 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, int gfp,
* virtual and bus address for that space.
*/
void *
-dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, int gfp)
+dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
{
return __dma_alloc(dev, size, handle, gfp,
pgprot_noncached(pgprot_kernel));
@@ -263,7 +263,7 @@ EXPORT_SYMBOL(dma_alloc_coherent);
* dma_alloc_coherent above.
*/
void *
-dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, int gfp)
+dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
{
return __dma_alloc(dev, size, handle, gfp,
pgprot_writecombine(pgprot_kernel));
diff --git a/arch/frv/mb93090-mb00/pci-dma-nommu.c b/arch/frv/mb93090-mb00/pci-dma-nommu.c
index 819895cf0b9..2082a9647f4 100644
--- a/arch/frv/mb93090-mb00/pci-dma-nommu.c
+++ b/arch/frv/mb93090-mb00/pci-dma-nommu.c
@@ -33,7 +33,7 @@ struct dma_alloc_record {
static DEFINE_SPINLOCK(dma_alloc_lock);
static LIST_HEAD(dma_alloc_list);
-void *dma_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, int gfp)
+void *dma_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t gfp)
{
struct dma_alloc_record *new;
struct list_head *this = &dma_alloc_list;
diff --git a/arch/frv/mb93090-mb00/pci-dma.c b/arch/frv/mb93090-mb00/pci-dma.c
index 27eb1206650..86fbdadc51b 100644
--- a/arch/frv/mb93090-mb00/pci-dma.c
+++ b/arch/frv/mb93090-mb00/pci-dma.c
@@ -17,7 +17,7 @@
#include <linux/highmem.h>
#include <asm/io.h>
-void *dma_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, int gfp)
+void *dma_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t gfp)
{
void *ret;
diff --git a/arch/frv/mm/dma-alloc.c b/arch/frv/mm/dma-alloc.c
index 4b38d45435f..cfc4f97490c 100644
--- a/arch/frv/mm/dma-alloc.c
+++ b/arch/frv/mm/dma-alloc.c
@@ -81,7 +81,7 @@ static int map_page(unsigned long va, unsigned long pa, pgprot_t prot)
* portions of the kernel with single large page TLB entries, and
* still get unique uncached pages for consistent DMA.
*/
-void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle)
+void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *dma_handle)
{
struct vm_struct *area;
unsigned long page, va, pa;
diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
index ab6e0611303..58ca98fdc2c 100644
--- a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
@@ -44,7 +44,7 @@
#define PFX "powernow-k8: "
#define BFX PFX "BIOS error: "
-#define VERSION "version 1.50.3"
+#define VERSION "version 1.50.4"
#include "powernow-k8.h"
/* serialize freq changes */
@@ -111,8 +111,8 @@ static int query_current_values_with_pending_wait(struct powernow_k8_data *data)
u32 i = 0;
do {
- if (i++ > 0x1000000) {
- printk(KERN_ERR PFX "detected change pending stuck\n");
+ if (i++ > 10000) {
+ dprintk("detected change pending stuck\n");
return 1;
}
rdmsr(MSR_FIDVID_STATUS, lo, hi);
@@ -159,6 +159,7 @@ static int write_new_fid(struct powernow_k8_data *data, u32 fid)
{
u32 lo;
u32 savevid = data->currvid;
+ u32 i = 0;
if ((fid & INVALID_FID_MASK) || (data->currvid & INVALID_VID_MASK)) {
printk(KERN_ERR PFX "internal error - overflow on fid write\n");
@@ -170,10 +171,13 @@ static int write_new_fid(struct powernow_k8_data *data, u32 fid)
dprintk("writing fid 0x%x, lo 0x%x, hi 0x%x\n",
fid, lo, data->plllock * PLL_LOCK_CONVERSION);
- wrmsr(MSR_FIDVID_CTL, lo, data->plllock * PLL_LOCK_CONVERSION);
-
- if (query_current_values_with_pending_wait(data))
- return 1;
+ do {
+ wrmsr(MSR_FIDVID_CTL, lo, data->plllock * PLL_LOCK_CONVERSION);
+ if (i++ > 100) {
+ printk(KERN_ERR PFX "internal error - pending bit very stuck - no further pstate changes possible\n");
+ return 1;
+ }
+ } while (query_current_values_with_pending_wait(data));
count_off_irt(data);
@@ -197,6 +201,7 @@ static int write_new_vid(struct powernow_k8_data *data, u32 vid)
{
u32 lo;
u32 savefid = data->currfid;
+ int i = 0;
if ((data->currfid & INVALID_FID_MASK) || (vid & INVALID_VID_MASK)) {
printk(KERN_ERR PFX "internal error - overflow on vid write\n");
@@ -208,10 +213,13 @@ static int write_new_vid(struct powernow_k8_data *data, u32 vid)
dprintk("writing vid 0x%x, lo 0x%x, hi 0x%x\n",
vid, lo, STOP_GRANT_5NS);
- wrmsr(MSR_FIDVID_CTL, lo, STOP_GRANT_5NS);
-
- if (query_current_values_with_pending_wait(data))
- return 1;
+ do {
+ wrmsr(MSR_FIDVID_CTL, lo, STOP_GRANT_5NS);
+ if (i++ > 100) {
+ printk(KERN_ERR PFX "internal error - pending bit very stuck - no further pstate changes possible\n");
+ return 1;
+ }
+ } while (query_current_values_with_pending_wait(data));
if (savefid != data->currfid) {
printk(KERN_ERR PFX "fid changed on vid trans, old 0x%x new 0x%x\n",
diff --git a/arch/ia64/hp/common/hwsw_iommu.c b/arch/ia64/hp/common/hwsw_iommu.c
index 80f8ef01393..1ba02baf2f9 100644
--- a/arch/ia64/hp/common/hwsw_iommu.c
+++ b/arch/ia64/hp/common/hwsw_iommu.c
@@ -71,7 +71,7 @@ hwsw_init (void)
}
void *
-hwsw_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, int flags)
+hwsw_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags)
{
if (use_swiotlb(dev))
return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
index 11957598a8b..21bffba78b6 100644
--- a/arch/ia64/hp/common/sba_iommu.c
+++ b/arch/ia64/hp/common/sba_iommu.c
@@ -1076,7 +1076,7 @@ void sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, int dir)
* See Documentation/DMA-mapping.txt
*/
void *
-sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, int flags)
+sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags)
{
struct ioc *ioc;
void *addr;
diff --git a/arch/ia64/lib/swiotlb.c b/arch/ia64/lib/swiotlb.c
index a604efc7f6c..3ebbb3c8ba3 100644
--- a/arch/ia64/lib/swiotlb.c
+++ b/arch/ia64/lib/swiotlb.c
@@ -314,7 +314,7 @@ sync_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
void *
swiotlb_alloc_coherent(struct device *hwdev, size_t size,
- dma_addr_t *dma_handle, int flags)
+ dma_addr_t *dma_handle, gfp_t flags)
{
unsigned long dev_addr;
void *ret;
diff --git a/arch/ia64/sn/kernel/xpc.h b/arch/ia64/sn/kernel/xpc.h
index d0ee635daf2..e5f5a4e51f7 100644
--- a/arch/ia64/sn/kernel/xpc.h
+++ b/arch/ia64/sn/kernel/xpc.h
@@ -939,7 +939,7 @@ xpc_map_bte_errors(bte_result_t error)
static inline void *
-xpc_kmalloc_cacheline_aligned(size_t size, int flags, void **base)
+xpc_kmalloc_cacheline_aligned(size_t size, gfp_t flags, void **base)
{
/* see if kmalloc will give us cachline aligned memory by default */
*base = kmalloc(size, flags);
diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c
index 0e4b9ad9ef0..75e6e874beb 100644
--- a/arch/ia64/sn/pci/pci_dma.c
+++ b/arch/ia64/sn/pci/pci_dma.c
@@ -75,7 +75,7 @@ EXPORT_SYMBOL(sn_dma_set_mask);
* more information.
*/
void *sn_dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t * dma_handle, int flags)
+ dma_addr_t * dma_handle, gfp_t flags)
{
void *cpuaddr;
unsigned long phys_addr;
diff --git a/arch/mips/mm/dma-coherent.c b/arch/mips/mm/dma-coherent.c
index 97a50d38c98..a617f8c327e 100644
--- a/arch/mips/mm/dma-coherent.c
+++ b/arch/mips/mm/dma-coherent.c
@@ -18,7 +18,7 @@
#include <asm/io.h>
void *dma_alloc_noncoherent(struct device *dev, size_t size,
- dma_addr_t * dma_handle, int gfp)
+ dma_addr_t * dma_handle, gfp_t gfp)
{
void *ret;
/* ignore region specifiers */
@@ -39,7 +39,7 @@ void *dma_alloc_noncoherent(struct device *dev, size_t size,
EXPORT_SYMBOL(dma_alloc_noncoherent);
void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t * dma_handle, int gfp)
+ dma_addr_t * dma_handle, gfp_t gfp)
__attribute__((alias("dma_alloc_noncoherent")));
EXPORT_SYMBOL(dma_alloc_coherent);
diff --git a/arch/mips/mm/dma-ip27.c b/arch/mips/mm/dma-ip27.c
index aa7c94b5d78..8da19fd22ac 100644
--- a/arch/mips/mm/dma-ip27.c
+++ b/arch/mips/mm/dma-ip27.c
@@ -22,7 +22,7 @@
pdev_to_baddr(to_pci_dev(dev), (addr))
void *dma_alloc_noncoherent(struct device *dev, size_t size,
- dma_addr_t * dma_handle, int gfp)
+ dma_addr_t * dma_handle, gfp_t gfp)
{
void *ret;
@@ -44,7 +44,7 @@ void *dma_alloc_noncoherent(struct device *dev, size_t size,
EXPORT_SYMBOL(dma_alloc_noncoherent);
void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t * dma_handle, int gfp)
+ dma_addr_t * dma_handle, gfp_t gfp)
__attribute__((alias("dma_alloc_noncoherent")));
EXPORT_SYMBOL(dma_alloc_coherent);
diff --git a/arch/mips/mm/dma-ip32.c b/arch/mips/mm/dma-ip32.c
index 2cbe196c35f..a7e3072ff78 100644
--- a/arch/mips/mm/dma-ip32.c
+++ b/arch/mips/mm/dma-ip32.c
@@ -37,7 +37,7 @@
#define RAM_OFFSET_MASK 0x3fffffff
void *dma_alloc_noncoherent(struct device *dev, size_t size,
- dma_addr_t * dma_handle, int gfp)
+ dma_addr_t * dma_handle, gfp_t gfp)
{
void *ret;
/* ignore region specifiers */
@@ -61,7 +61,7 @@ void *dma_alloc_noncoherent(struct device *dev, size_t size,
EXPORT_SYMBOL(dma_alloc_noncoherent);
void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t * dma_handle, int gfp)
+ dma_addr_t * dma_handle, gfp_t gfp)
{
void *ret;
diff --git a/arch/mips/mm/dma-noncoherent.c b/arch/mips/mm/dma-noncoherent.c
index 59e54f12212..4ce02028a29 100644
--- a/arch/mips/mm/dma-noncoherent.c
+++ b/arch/mips/mm/dma-noncoherent.c
@@ -24,7 +24,7 @@
*/
void *dma_alloc_noncoherent(struct device *dev, size_t size,
- dma_addr_t * dma_handle, int gfp)
+ dma_addr_t * dma_handle, gfp_t gfp)
{
void *ret;
/* ignore region specifiers */
@@ -45,7 +45,7 @@ void *dma_alloc_noncoherent(struct device *dev, size_t size,
EXPORT_SYMBOL(dma_alloc_noncoherent);
void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t * dma_handle, int gfp)
+ dma_addr_t * dma_handle, gfp_t gfp)
{
void *ret;
diff --git a/arch/parisc/kernel/pci-dma.c b/arch/parisc/kernel/pci-dma.c
index 368cc095c99..844c2877a2e 100644
--- a/arch/parisc/kernel/pci-dma.c
+++ b/arch/parisc/kernel/pci-dma.c
@@ -349,7 +349,7 @@ pcxl_dma_init(void)
__initcall(pcxl_dma_init);
-static void * pa11_dma_alloc_consistent (struct device *dev, size_t size, dma_addr_t *dma_handle, int flag)
+static void * pa11_dma_alloc_consistent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag)
{
unsigned long vaddr;
unsigned long paddr;
@@ -502,13 +502,13 @@ struct hppa_dma_ops pcxl_dma_ops = {
};
static void *fail_alloc_consistent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, int flag)
+ dma_addr_t *dma_handle, gfp_t flag)
{
return NULL;
}
static void *pa11_dma_alloc_noncoherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, int flag)
+ dma_addr_t *dma_handle, gfp_t flag)
{
void *addr = NULL;
diff --git a/arch/ppc/8xx_io/cs4218.h b/arch/ppc/8xx_io/cs4218.h
index a3c38c5a5db..f1c7392255f 100644
--- a/arch/ppc/8xx_io/cs4218.h
+++ b/arch/ppc/8xx_io/cs4218.h
@@ -78,7 +78,7 @@ typedef struct {
const char *name2;
void (*open)(void);
void (*release)(void);
- void *(*dma_alloc)(unsigned int, int);
+ void *(*dma_alloc)(unsigned int, gfp_t);
void (*dma_free)(void *, unsigned int);
int (*irqinit)(void);
#ifdef MODULE
diff --git a/arch/ppc/8xx_io/cs4218_tdm.c b/arch/ppc/8xx_io/cs4218_tdm.c
index 2ca9ec7ec3a..532caa388dc 100644
--- a/arch/ppc/8xx_io/cs4218_tdm.c
+++ b/arch/ppc/8xx_io/cs4218_tdm.c
@@ -318,7 +318,7 @@ struct cs_sound_settings {
static struct cs_sound_settings sound;
-static void *CS_Alloc(unsigned int size, int flags);
+static void *CS_Alloc(unsigned int size, gfp_t flags);
static void CS_Free(void *ptr, unsigned int size);
static int CS_IrqInit(void);
#ifdef MODULE
@@ -959,7 +959,7 @@ static TRANS transCSNormalRead = {
/*** Low level stuff *********************************************************/
-static void *CS_Alloc(unsigned int size, int flags)
+static void *CS_Alloc(unsigned int size, gfp_t flags)
{
int order;
diff --git a/arch/ppc/kernel/dma-mapping.c b/arch/ppc/kernel/dma-mapping.c
index 8edee806dae..0f710d2baec 100644
--- a/arch/ppc/kernel/dma-mapping.c
+++ b/arch/ppc/kernel/dma-mapping.c
@@ -115,7 +115,7 @@ static struct vm_region consistent_head = {
};
static struct vm_region *
-vm_region_alloc(struct vm_region *head, size_t size, int gfp)
+vm_region_alloc(struct vm_region *head, size_t size, gfp_t gfp)
{
unsigned long addr = head->vm_start, end = head->vm_end - size;
unsigned long flags;
@@ -173,7 +173,7 @@ static struct vm_region *vm_region_find(struct vm_region *head, unsigned long ad
* virtual and bus address for that space.
*/
void *
-__dma_alloc_coherent(size_t size, dma_addr_t *handle, int gfp)
+__dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp)
{
struct page *page;
struct vm_region *c;
diff --git a/arch/ppc/mm/pgtable.c b/arch/ppc/mm/pgtable.c
index 81a3d7446d3..43505b1fc5d 100644
--- a/arch/ppc/mm/pgtable.c
+++ b/arch/ppc/mm/pgtable.c
@@ -114,9 +114,9 @@ struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
struct page *ptepage;
#ifdef CONFIG_HIGHPTE
- int flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_REPEAT;
+ gfp_t flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_REPEAT;
#else
- int flags = GFP_KERNEL | __GFP_REPEAT;
+ gfp_t flags = GFP_KERNEL | __GFP_REPEAT;
#endif
ptepage = alloc_pages(flags, 0);
diff --git a/arch/ppc64/kernel/iSeries_htab.c b/arch/ppc64/kernel/iSeries_htab.c
index 2192055a90a..073b7666174 100644
--- a/arch/ppc64/kernel/iSeries_htab.c
+++ b/arch/ppc64/kernel/iSeries_htab.c
@@ -66,7 +66,7 @@ static long iSeries_hpte_insert(unsigned long hpte_group, unsigned long va,
}
if (slot < 0) { /* MSB set means secondary group */
- vflags |= HPTE_V_VALID;
+ vflags |= HPTE_V_SECONDARY;
secondary = 1;
slot &= 0x7fffffffffffffff;
}
diff --git a/arch/ppc64/kernel/mpic.c b/arch/ppc64/kernel/mpic.c
index cc262a05ddb..5f5bc73754d 100644
--- a/arch/ppc64/kernel/mpic.c
+++ b/arch/ppc64/kernel/mpic.c
@@ -506,8 +506,8 @@ struct mpic * __init mpic_alloc(unsigned long phys_addr,
mpic->senses_count = senses_count;
/* Map the global registers */
- mpic->gregs = ioremap(phys_addr + MPIC_GREG_BASE, 0x1000);
- mpic->tmregs = mpic->gregs + (MPIC_TIMER_BASE >> 2);
+ mpic->gregs = ioremap(phys_addr + MPIC_GREG_BASE, 0x2000);
+ mpic->tmregs = mpic->gregs + ((MPIC_TIMER_BASE - MPIC_GREG_BASE) >> 2);
BUG_ON(mpic->gregs == NULL);
/* Reset */
diff --git a/arch/ppc64/kernel/time.c b/arch/ppc64/kernel/time.c
index 9939c206afa..b56c6a324e1 100644
--- a/arch/ppc64/kernel/time.c
+++ b/arch/ppc64/kernel/time.c
@@ -870,7 +870,7 @@ void div128_by_32( unsigned long dividend_high, unsigned long dividend_low,
rb = ((ra + b) - (x * divisor)) << 32;
y = (rb + c)/divisor;
- rc = ((rb + b) - (y * divisor)) << 32;
+ rc = ((rb + c) - (y * divisor)) << 32;
z = (rc + d)/divisor;
diff --git a/arch/ppc64/mm/init.c b/arch/ppc64/mm/init.c
index c2157c9c3ac..be64b157afc 100644
--- a/arch/ppc64/mm/init.c
+++ b/arch/ppc64/mm/init.c
@@ -799,8 +799,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long ea,
if (cpus_equal(vma->vm_mm->cpu_vm_mask, tmp))
local = 1;
- __hash_page(ea, pte_val(pte) & (_PAGE_USER|_PAGE_RW), vsid, ptep,
- 0x300, local);
+ __hash_page(ea, 0, vsid, ptep, 0x300, local);
local_irq_restore(flags);
}
diff --git a/arch/sh/boards/renesas/rts7751r2d/mach.c b/arch/sh/boards/renesas/rts7751r2d/mach.c
index 1efc18e786d..610740512d5 100644
--- a/arch/sh/boards/renesas/rts7751r2d/mach.c
+++ b/arch/sh/boards/renesas/rts7751r2d/mach.c
@@ -23,7 +23,7 @@ extern void init_rts7751r2d_IRQ(void);
extern void *rts7751r2d_ioremap(unsigned long, unsigned long);
extern int rts7751r2d_irq_demux(int irq);
-extern void *voyagergx_consistent_alloc(struct device *, size_t, dma_addr_t *, int);
+extern void *voyagergx_consistent_alloc(struct device *, size_t, dma_addr_t *, gfp_t);
extern int voyagergx_consistent_free(struct device *, size_t, void *, dma_addr_t);
/*
diff --git a/arch/sh/cchips/voyagergx/consistent.c b/arch/sh/cchips/voyagergx/consistent.c
index 5b92585a38d..3d9a02c093a 100644
--- a/arch/sh/cchips/voyagergx/consistent.c
+++ b/arch/sh/cchips/voyagergx/consistent.c
@@ -31,7 +31,7 @@ static LIST_HEAD(voya_alloc_list);
#define OHCI_SRAM_SIZE 0x10000
void *voyagergx_consistent_alloc(struct device *dev, size_t size,
- dma_addr_t *handle, int flag)
+ dma_addr_t *handle, gfp_t flag)
{
struct list_head *list = &voya_alloc_list;
struct voya_alloc_entry *entry;
diff --git a/arch/sh/drivers/pci/dma-dreamcast.c b/arch/sh/drivers/pci/dma-dreamcast.c
index 83de7ef4e7d..e12418bb1fa 100644
--- a/arch/sh/drivers/pci/dma-dreamcast.c
+++ b/arch/sh/drivers/pci/dma-dreamcast.c
@@ -33,7 +33,7 @@
static int gapspci_dma_used = 0;
void *dreamcast_consistent_alloc(struct device *dev, size_t size,
- dma_addr_t *dma_handle, int flag)
+ dma_addr_t *dma_handle, gfp_t flag)
{
unsigned long buf;
diff --git a/arch/sh/mm/consistent.c b/arch/sh/mm/consistent.c
index 1f7af0c73cf..df3a9e452cc 100644
--- a/arch/sh/mm/consistent.c
+++ b/arch/sh/mm/consistent.c
@@ -11,7 +11,7 @@
#include <linux/dma-mapping.h>
#include <asm/io.h>
-void *consistent_alloc(int gfp, size_t size, dma_addr_t *handle)
+void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *handle)
{
struct page *page, *end, *free;
void *ret;
diff --git a/arch/sparc64/solaris/socksys.c b/arch/sparc64/solaris/socksys.c
index d7c1c76582c..fc6669e8dde 100644
--- a/arch/sparc64/solaris/socksys.c
+++ b/arch/sparc64/solaris/socksys.c
@@ -49,7 +49,7 @@ IPPROTO_EGP, IPPROTO_PUP, IPPROTO_UDP, IPPROTO_IDP, IPPROTO_RAW,
#else
-extern void * mykmalloc(size_t s, int gfp);
+extern void * mykmalloc(size_t s, gfp_t gfp);
extern void mykfree(void *);
#endif
diff --git a/arch/sparc64/solaris/timod.c b/arch/sparc64/solaris/timod.c
index aaad29c35c8..b84e5456b02 100644
--- a/arch/sparc64/solaris/timod.c
+++ b/arch/sparc64/solaris/timod.c
@@ -39,7 +39,7 @@ static char * page = NULL ;
#else
-void * mykmalloc(size_t s, int gfp)
+void * mykmalloc(size_t s, gfp_t gfp)
{
static char * page;
static size_t free;
diff --git a/arch/um/include/sysdep-i386/thread.h b/arch/um/include/sysdep-i386/thread.h
index e2bd6bae8b8..243fed44d78 100644
--- a/arch/um/include/sysdep-i386/thread.h
+++ b/arch/um/include/sysdep-i386/thread.h
@@ -4,7 +4,7 @@
#include <kern_constants.h>
#define TASK_DEBUGREGS(task) ((unsigned long *) &(((char *) (task))[HOST_TASK_DEBUGREGS]))
-#ifdef CONFIG_MODE_TT
+#ifdef UML_CONFIG_MODE_TT
#define TASK_EXTERN_PID(task) *((int *) &(((char *) (task))[HOST_TASK_EXTERN_PID]))
#endif
diff --git a/arch/um/include/sysdep-x86_64/thread.h b/arch/um/include/sysdep-x86_64/thread.h
index 6a76a7f3683..cbef3e1697f 100644
--- a/arch/um/include/sysdep-x86_64/thread.h
+++ b/arch/um/include/sysdep-x86_64/thread.h
@@ -3,7 +3,7 @@
#include <kern_constants.h>
-#ifdef CONFIG_MODE_TT
+#ifdef UML_CONFIG_MODE_TT
#define TASK_EXTERN_PID(task) *((int *) &(((char *) (task))[HOST_TASK_EXTERN_PID]))
#endif
diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c
index ea008b031a8..462cc9d6538 100644
--- a/arch/um/kernel/mem.c
+++ b/arch/um/kernel/mem.c
@@ -252,7 +252,7 @@ void paging_init(void)
#endif
}
-struct page *arch_validate(struct page *page, int mask, int order)
+struct page *arch_validate(struct page *page, gfp_t mask, int order)
{
unsigned long addr, zero = 0;
int i;
diff --git a/arch/um/kernel/process_kern.c b/arch/um/kernel/process_kern.c
index ea65db679e9..0d73ceeece7 100644
--- a/arch/um/kernel/process_kern.c
+++ b/arch/um/kernel/process_kern.c
@@ -80,7 +80,7 @@ void free_stack(unsigned long stack, int order)
unsigned long alloc_stack(int order, int atomic)
{
unsigned long page;
- int flags = GFP_KERNEL;
+ gfp_t flags = GFP_KERNEL;
if (atomic)
flags = GFP_ATOMIC;
diff --git a/arch/x86_64/kernel/pci-gart.c b/arch/x86_64/kernel/pci-gart.c
index cf0a0315d58..88be97c9698 100644
--- a/arch/x86_64/kernel/pci-gart.c
+++ b/arch/x86_64/kernel/pci-gart.c
@@ -187,7 +187,7 @@ static void flush_gart(struct device *dev)
/* Allocate DMA memory on node near device */
noinline
-static void *dma_alloc_pages(struct device *dev, unsigned gfp, unsigned order)
+static void *dma_alloc_pages(struct device *dev, gfp_t gfp, unsigned order)
{
struct page *page;
int node;
@@ -204,7 +204,7 @@ static void *dma_alloc_pages(struct device *dev, unsigned gfp, unsigned order)
*/
void *
dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
- unsigned gfp)
+ gfp_t gfp)
{
void *memory;
unsigned long dma_mask = 0;
diff --git a/arch/x86_64/kernel/pci-nommu.c b/arch/x86_64/kernel/pci-nommu.c
index 67d90b89af0..5a981dca87f 100644
--- a/arch/x86_64/kernel/pci-nommu.c
+++ b/arch/x86_64/kernel/pci-nommu.c
@@ -24,7 +24,7 @@ EXPORT_SYMBOL(iommu_sac_force);
*/
void *dma_alloc_coherent(struct device *hwdev, size_t size,
- dma_addr_t *dma_handle, unsigned gfp)
+ dma_addr_t *dma_handle, gfp_t gfp)
{
void *ret;
u64 mask;
diff --git a/arch/xtensa/kernel/pci-dma.c b/arch/xtensa/kernel/pci-dma.c
index 84fde258cf8..1ff82268e8e 100644
--- a/arch/xtensa/kernel/pci-dma.c
+++ b/arch/xtensa/kernel/pci-dma.c
@@ -29,7 +29,7 @@
*/
void *
-dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, int gfp)
+dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
{
void *ret;
diff --git a/drivers/block/as-iosched.c b/drivers/block/as-iosched.c
index 95c0a3690b0..4081c36c8c1 100644
--- a/drivers/block/as-iosched.c
+++ b/drivers/block/as-iosched.c
@@ -98,7 +98,6 @@ struct as_data {
struct as_rq *next_arq[2]; /* next in sort order */
sector_t last_sector[2]; /* last REQ_SYNC & REQ_ASYNC sectors */
- struct list_head *dispatch; /* driver dispatch queue */
struct list_head *hash; /* request hash */
unsigned long exit_prob; /* probability a task will exit while
@@ -239,6 +238,25 @@ static struct io_context *as_get_io_context(void)
return ioc;
}
+static void as_put_io_context(struct as_rq *arq)
+{
+ struct as_io_context *aic;
+
+ if (unlikely(!arq->io_context))
+ return;
+
+ aic = arq->io_context->aic;
+
+ if (arq->is_sync == REQ_SYNC && aic) {
+ spin_lock(&aic->lock);
+ set_bit(AS_TASK_IORUNNING, &aic->state);
+ aic->last_end_request = jiffies;
+ spin_unlock(&aic->lock);
+ }
+
+ put_io_context(arq->io_context);
+}
+
/*
* the back merge hash support functions
*/
@@ -261,14 +279,6 @@ static inline void as_del_arq_hash(struct as_rq *arq)
__as_del_arq_hash(arq);
}
-static void as_remove_merge_hints(request_queue_t *q, struct as_rq *arq)
-{
- as_del_arq_hash(arq);
-
- if (q->last_merge == arq->request)
- q->last_merge = NULL;
-}
-
static void as_add_arq_hash(struct as_data *ad, struct as_rq *arq)
{
struct request *rq = arq->request;
@@ -312,7 +322,7 @@ static struct request *as_find_arq_hash(struct as_data *ad, sector_t offset)
BUG_ON(!arq->on_hash);
if (!rq_mergeable(__rq)) {
- as_remove_merge_hints(ad->q, arq);
+ as_del_arq_hash(arq);
continue;
}
@@ -950,23 +960,12 @@ static void as_completed_request(request_queue_t *q, struct request *rq)
WARN_ON(!list_empty(&rq->queuelist));
- if (arq->state == AS_RQ_PRESCHED) {
- WARN_ON(arq->io_context);
- goto out;
- }
-
- if (arq->state == AS_RQ_MERGED)
- goto out_ioc;
-
if (arq->state != AS_RQ_REMOVED) {
printk("arq->state %d\n", arq->state);
WARN_ON(1);
goto out;
}
- if (!blk_fs_request(rq))
- goto out;
-
if (ad->changed_batch && ad->nr_dispatched == 1) {
kblockd_schedule_work(&ad->antic_work);
ad->changed_batch = 0;
@@ -1001,21 +1000,7 @@ static void as_completed_request(request_queue_t *q, struct request *rq)
}
}
-out_ioc:
- if (!arq->io_context)
- goto out;
-
- if (arq->is_sync == REQ_SYNC) {
- struct as_io_context *aic = arq->io_context->aic;
- if (aic) {
- spin_lock(&aic->lock);
- set_bit(AS_TASK_IORUNNING, &aic->state);
- aic->last_end_request = jiffies;
- spin_unlock(&aic->lock);
- }
- }
-
- put_io_context(arq->io_context);
+ as_put_io_context(arq);
out:
arq->state = AS_RQ_POSTSCHED;
}
@@ -1047,73 +1032,11 @@ static void as_remove_queued_request(request_queue_t *q, struct request *rq)
ad->next_arq[data_dir] = as_find_next_arq(ad, arq);
list_del_init(&arq->fifo);
- as_remove_merge_hints(q, arq);
+ as_del_arq_hash(arq);
as_del_arq_rb(ad, arq);
}
/*
- * as_remove_dispatched_request is called to remove a request which has gone
- * to the dispatch list.
- */
-static void as_remove_dispatched_request(request_queue_t *q, struct request *rq)
-{
- struct as_rq *arq = RQ_DATA(rq);
- struct as_io_context *aic;
-
- if (!arq) {
- WARN_ON(1);
- return;
- }
-
- WARN_ON(arq->state != AS_RQ_DISPATCHED);
- WARN_ON(ON_RB(&arq->rb_node));
- if (arq->io_context && arq->io_context->aic) {
- aic = arq->io_context->aic;
- if (aic) {
- WARN_ON(!atomic_read(&aic->nr_dispatched));
- atomic_dec(&aic->nr_dispatched);
- }
- }
-}
-
-/*
- * as_remove_request is called when a driver has finished with a request.
- * This should be only called for dispatched requests, but for some reason
- * a POWER4 box running hwscan it does not.
- */
-static void as_remove_request(request_queue_t *q, struct request *rq)
-{
- struct as_rq *arq = RQ_DATA(rq);
-
- if (unlikely(arq->state == AS_RQ_NEW))
- goto out;
-
- if (ON_RB(&arq->rb_node)) {
- if (arq->state != AS_RQ_QUEUED) {
- printk("arq->state %d\n", arq->state);
- WARN_ON(1);
- goto out;
- }
- /*
- * We'll lose the aliased request(s) here. I don't think this
- * will ever happen, but if it does, hopefully someone will
- * report it.
- */
- WARN_ON(!list_empty(&rq->queuelist));
- as_remove_queued_request(q, rq);
- } else {
- if (arq->state != AS_RQ_DISPATCHED) {
- printk("arq->state %d\n", arq->state);
- WARN_ON(1);
- goto out;
- }
- as_remove_dispatched_request(q, rq);
- }
-out:
- arq->state = AS_RQ_REMOVED;
-}
-
-/*
* as_fifo_expired returns 0 if there are no expired reads on the fifo,
* 1 otherwise. It is ratelimited so that we only perform the check once per
* `fifo_expire' interval. Otherwise a large number of expired requests
@@ -1165,7 +1088,6 @@ static inline int as_batch_expired(struct as_data *ad)
static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq)
{
struct request *rq = arq->request;
- struct list_head *insert;
const int data_dir = arq->is_sync;
BUG_ON(!ON_RB(&arq->rb_node));
@@ -1198,13 +1120,13 @@ static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq)
/*
* take it off the sort and fifo list, add to dispatch queue
*/
- insert = ad->dispatch->prev;
-
while (!list_empty(&rq->queuelist)) {
struct request *__rq = list_entry_rq(rq->queuelist.next);
struct as_rq *__arq = RQ_DATA(__rq);
- list_move_tail(&__rq->queuelist, ad->dispatch);
+ list_del(&__rq->queuelist);
+
+ elv_dispatch_add_tail(ad->q, __rq);
if (__arq->io_context && __arq->io_context->aic)
atomic_inc(&__arq->io_context->aic->nr_dispatched);
@@ -1218,7 +1140,8 @@ static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq)
as_remove_queued_request(ad->q, rq);
WARN_ON(arq->state != AS_RQ_QUEUED);
- list_add(&rq->queuelist, insert);
+ elv_dispatch_sort(ad->q, rq);
+
arq->state = AS_RQ_DISPATCHED;
if (arq->io_context && arq->io_context->aic)
atomic_inc(&arq->io_context->aic->nr_dispatched);
@@ -1230,12 +1153,42 @@ static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq)
* read/write expire, batch expire, etc, and moves it to the dispatch
* queue. Returns 1 if a request was found, 0 otherwise.
*/
-static int as_dispatch_request(struct as_data *ad)
+static int as_dispatch_request(request_queue_t *q, int force)
{
+ struct as_data *ad = q->elevator->elevator_data;
struct as_rq *arq;
const int reads = !list_empty(&ad->fifo_list[REQ_SYNC]);
const int writes = !list_empty(&ad->fifo_list[REQ_ASYNC]);
+ if (unlikely(force)) {
+ /*
+ * Forced dispatch, accounting is useless. Reset
+ * accounting states and dump fifo_lists. Note that
+ * batch_data_dir is reset to REQ_SYNC to avoid
+ * screwing write batch accounting as write batch
+ * accounting occurs on W->R transition.
+ */
+ int dispatched = 0;
+
+ ad->batch_data_dir = REQ_SYNC;
+ ad->changed_batch = 0;
+ ad->new_batch = 0;
+
+ while (ad->next_arq[REQ_SYNC]) {
+ as_move_to_dispatch(ad, ad->next_arq[REQ_SYNC]);
+ dispatched++;
+ }
+ ad->last_check_fifo[REQ_SYNC] = jiffies;
+
+ while (ad->next_arq[REQ_ASYNC]) {
+ as_move_to_dispatch(ad, ad->next_arq[REQ_ASYNC]);
+ dispatched++;
+ }
+ ad->last_check_fifo[REQ_ASYNC] = jiffies;
+
+ return dispatched;
+ }
+
/* Signal that the write batch was uncontended, so we can't time it */
if (ad->batch_data_dir == REQ_ASYNC && !reads) {
if (ad->current_write_count == 0 || !writes)
@@ -1359,20 +1312,6 @@ fifo_expired:
return 1;
}
-static struct request *as_next_request(request_queue_t *q)
-{
- struct as_data *ad = q->elevator->elevator_data;
- struct request *rq = NULL;
-
- /*
- * if there are still requests on the dispatch queue, grab the first
- */
- if (!list_empty(ad->dispatch) || as_dispatch_request(ad))
- rq = list_entry_rq(ad->dispatch->next);
-
- return rq;
-}
-
/*
* Add arq to a list behind alias
*/
@@ -1404,17 +1343,25 @@ as_add_aliased_request(struct as_data *ad, struct as_rq *arq, struct as_rq *alia
/*
* Don't want to have to handle merges.
*/
- as_remove_merge_hints(ad->q, arq);
+ as_del_arq_hash(arq);
}
/*
* add arq to rbtree and fifo
*/
-static void as_add_request(struct as_data *ad, struct as_rq *arq)
+static void as_add_request(request_queue_t *q, struct request *rq)
{
+ struct as_data *ad = q->elevator->elevator_data;
+ struct as_rq *arq = RQ_DATA(rq);
struct as_rq *alias;
int data_dir;
+ if (arq->state != AS_RQ_PRESCHED) {
+ printk("arq->state: %d\n", arq->state);
+ WARN_ON(1);
+ }
+ arq->state = AS_RQ_NEW;
+
if (rq_data_dir(arq->request) == READ
|| current->flags&PF_SYNCWRITE)
arq->is_sync = 1;
@@ -1437,12 +1384,8 @@ static void as_add_request(struct as_data *ad, struct as_rq *arq)
arq->expires = jiffies + ad->fifo_expire[data_dir];
list_add_tail(&arq->fifo, &ad->fifo_list[data_dir]);
- if (rq_mergeable(arq->request)) {
+ if (rq_mergeable(arq->request))
as_add_arq_hash(ad, arq);
-
- if (!ad->q->last_merge)
- ad->q->last_merge = arq->request;
- }
as_update_arq(ad, arq); /* keep state machine up to date */
} else {
@@ -1463,96 +1406,24 @@ static void as_add_request(struct as_data *ad, struct as_rq *arq)
arq->state = AS_RQ_QUEUED;
}
-static void as_deactivate_request(request_queue_t *q, struct request *rq)
+static void as_activate_request(request_queue_t *q, struct request *rq)
{
- struct as_data *ad = q->elevator->elevator_data;
struct as_rq *arq = RQ_DATA(rq);
- if (arq) {
- if (arq->state == AS_RQ_REMOVED) {
- arq->state = AS_RQ_DISPATCHED;
- if (arq->io_context && arq->io_context->aic)
- atomic_inc(&arq->io_context->aic->nr_dispatched);
- }
- } else
- WARN_ON(blk_fs_request(rq)
- && (!(rq->flags & (REQ_HARDBARRIER|REQ_SOFTBARRIER))) );
-
- /* Stop anticipating - let this request get through */
- as_antic_stop(ad);
-}
-
-/*
- * requeue the request. The request has not been completed, nor is it a
- * new request, so don't touch accounting.
- */
-static void as_requeue_request(request_queue_t *q, struct request *rq)
-{
- as_deactivate_request(q, rq);
- list_add(&rq->queuelist, &q->queue_head);
-}
-
-/*
- * Account a request that is inserted directly onto the dispatch queue.
- * arq->io_context->aic->nr_dispatched should not need to be incremented
- * because only new requests should come through here: requeues go through
- * our explicit requeue handler.
- */
-static void as_account_queued_request(struct as_data *ad, struct request *rq)
-{
- if (blk_fs_request(rq)) {
- struct as_rq *arq = RQ_DATA(rq);
- arq->state = AS_RQ_DISPATCHED;
- ad->nr_dispatched++;
- }
+ WARN_ON(arq->state != AS_RQ_DISPATCHED);
+ arq->state = AS_RQ_REMOVED;
+ if (arq->io_context && arq->io_context->aic)
+ atomic_dec(&arq->io_context->aic->nr_dispatched);
}
-static void
-as_insert_request(request_queue_t *q, struct request *rq, int where)
+static void as_deactivate_request(request_queue_t *q, struct request *rq)
{
- struct as_data *ad = q->elevator->elevator_data;
struct as_rq *arq = RQ_DATA(rq);
- if (arq) {
- if (arq->state != AS_RQ_PRESCHED) {
- printk("arq->state: %d\n", arq->state);
- WARN_ON(1);
- }
- arq->state = AS_RQ_NEW;
- }
-
- /* barriers must flush the reorder queue */
- if (unlikely(rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)
- && where == ELEVATOR_INSERT_SORT)) {
- WARN_ON(1);
- where = ELEVATOR_INSERT_BACK;
- }
-
- switch (where) {
- case ELEVATOR_INSERT_BACK:
- while (ad->next_arq[REQ_SYNC])
- as_move_to_dispatch(ad, ad->next_arq[REQ_SYNC]);
-
- while (ad->next_arq[REQ_ASYNC])
- as_move_to_dispatch(ad, ad->next_arq[REQ_ASYNC]);
-
- list_add_tail(&rq->queuelist, ad->dispatch);
- as_account_queued_request(ad, rq);
- as_antic_stop(ad);
- break;
- case ELEVATOR_INSERT_FRONT:
- list_add(&rq->queuelist, ad->dispatch);
- as_account_queued_request(ad, rq);
- as_antic_stop(ad);
- break;
- case ELEVATOR_INSERT_SORT:
- BUG_ON(!blk_fs_request(rq));
- as_add_request(ad, arq);
- break;
- default:
- BUG();
- return;
- }
+ WARN_ON(arq->state != AS_RQ_REMOVED);
+ arq->state = AS_RQ_DISPATCHED;
+ if (arq->io_context && arq->io_context->aic)
+ atomic_inc(&arq->io_context->aic->nr_dispatched);
}
/*
@@ -1565,12 +1436,8 @@ static int as_queue_empty(request_queue_t *q)
{
struct as_data *ad = q->elevator->elevator_data;
- if (!list_empty(&ad->fifo_list[REQ_ASYNC])
- || !list_empty(&ad->fifo_list[REQ_SYNC])
- || !list_empty(ad->dispatch))
- return 0;
-
- return 1;
+ return list_empty(&ad->fifo_list[REQ_ASYNC])
+ && list_empty(&ad->fifo_list[REQ_SYNC]);
}
static struct request *
@@ -1608,15 +1475,6 @@ as_merge(request_queue_t *q, struct request **req, struct bio *bio)
int ret;
/*
- * try last_merge to avoid going to hash
- */
- ret = elv_try_last_merge(q, bio);
- if (ret != ELEVATOR_NO_MERGE) {
- __rq = q->last_merge;
- goto out_insert;
- }
-
- /*
* see if the merge hash can satisfy a back merge
*/
__rq = as_find_arq_hash(ad, bio->bi_sector);
@@ -1644,9 +1502,6 @@ as_merge(request_queue_t *q, struct request **req, struct bio *bio)
return ELEVATOR_NO_MERGE;
out:
- if (rq_mergeable(__rq))
- q->last_merge = __rq;
-out_insert:
if (ret) {
if (rq_mergeable(__rq))
as_hot_arq_hash(ad, RQ_DATA(__rq));
@@ -1693,9 +1548,6 @@ static void as_merged_request(request_queue_t *q, struct request *req)
* behind the disk head. We currently don't bother adjusting.
*/
}
-
- if (arq->on_hash)
- q->last_merge = req;
}
static void
@@ -1763,6 +1615,7 @@ as_merged_requests(request_queue_t *q, struct request *req,
* kill knowledge of next, this one is a goner
*/
as_remove_queued_request(q, next);
+ as_put_io_context(anext);
anext->state = AS_RQ_MERGED;
}
@@ -1782,7 +1635,7 @@ static void as_work_handler(void *data)
unsigned long flags;
spin_lock_irqsave(q->queue_lock, flags);
- if (as_next_request(q))
+ if (!as_queue_empty(q))
q->request_fn(q);
spin_unlock_irqrestore(q->queue_lock, flags);
}
@@ -1797,7 +1650,9 @@ static void as_put_request(request_queue_t *q, struct request *rq)
return;
}
- if (arq->state != AS_RQ_POSTSCHED && arq->state != AS_RQ_PRESCHED) {
+ if (unlikely(arq->state != AS_RQ_POSTSCHED &&
+ arq->state != AS_RQ_PRESCHED &&
+ arq->state != AS_RQ_MERGED)) {
printk("arq->state %d\n", arq->state);
WARN_ON(1);
}
@@ -1807,7 +1662,7 @@ static void as_put_request(request_queue_t *q, struct request *rq)
}
static int as_set_request(request_queue_t *q, struct request *rq,
- struct bio *bio, int gfp_mask)
+ struct bio *bio, gfp_t gfp_mask)
{
struct as_data *ad = q->elevator->elevator_data;
struct as_rq *arq = mempool_alloc(ad->arq_pool, gfp_mask);
@@ -1907,7 +1762,6 @@ static int as_init_queue(request_queue_t *q, elevator_t *e)
INIT_LIST_HEAD(&ad->fifo_list[REQ_ASYNC]);
ad->sort_list[REQ_SYNC] = RB_ROOT;
ad->sort_list[REQ_ASYNC] = RB_ROOT;
- ad->dispatch = &q->queue_head;
ad->fifo_expire[REQ_SYNC] = default_read_expire;
ad->fifo_expire[REQ_ASYNC] = default_write_expire;
ad->antic_expire = default_antic_expire;
@@ -2072,10 +1926,9 @@ static struct elevator_type iosched_as = {
.elevator_merge_fn = as_merge,
.elevator_merged_fn = as_merged_request,
.elevator_merge_req_fn = as_merged_requests,
- .elevator_next_req_fn = as_next_request,
- .elevator_add_req_fn = as_insert_request,
- .elevator_remove_req_fn = as_remove_request,
- .elevator_requeue_req_fn = as_requeue_request,
+ .elevator_dispatch_fn = as_dispatch_request,
+ .elevator_add_req_fn = as_add_request,
+ .elevator_activate_req_fn = as_activate_request,
.elevator_deactivate_req_fn = as_deactivate_request,
.elevator_queue_empty_fn = as_queue_empty,
.elevator_completed_req_fn = as_completed_request,
diff --git a/drivers/block/cfq-iosched.c b/drivers/block/cfq-iosched.c
index cd056e7e64e..94690e4d41e 100644
--- a/drivers/block/cfq-iosched.c
+++ b/drivers/block/cfq-iosched.c
@@ -84,7 +84,6 @@ static int cfq_max_depth = 2;
(node)->rb_left = NULL; \
} while (0)
#define RB_CLEAR_ROOT(root) ((root)->rb_node = NULL)
-#define ON_RB(node) ((node)->rb_color != RB_NONE)
#define rb_entry_crq(node) rb_entry((node), struct cfq_rq, rb_node)
#define rq_rb_key(rq) (rq)->sector
@@ -271,10 +270,7 @@ CFQ_CFQQ_FNS(expired);
#undef CFQ_CFQQ_FNS
enum cfq_rq_state_flags {
- CFQ_CRQ_FLAG_in_flight = 0,
- CFQ_CRQ_FLAG_in_driver,
- CFQ_CRQ_FLAG_is_sync,
- CFQ_CRQ_FLAG_requeued,
+ CFQ_CRQ_FLAG_is_sync = 0,
};
#define CFQ_CRQ_FNS(name) \
@@ -291,14 +287,11 @@ static inline int cfq_crq_##name(const struct cfq_rq *crq) \
return (crq->crq_flags & (1 << CFQ_CRQ_FLAG_##name)) != 0; \
}
-CFQ_CRQ_FNS(in_flight);
-CFQ_CRQ_FNS(in_driver);
CFQ_CRQ_FNS(is_sync);
-CFQ_CRQ_FNS(requeued);
#undef CFQ_CRQ_FNS
static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *, unsigned int, unsigned short);
-static void cfq_dispatch_sort(request_queue_t *, struct cfq_rq *);
+static void cfq_dispatch_insert(request_queue_t *, struct cfq_rq *);
static void cfq_put_cfqd(struct cfq_data *cfqd);
#define process_sync(tsk) ((tsk)->flags & PF_SYNCWRITE)
@@ -311,14 +304,6 @@ static inline void cfq_del_crq_hash(struct cfq_rq *crq)
hlist_del_init(&crq->hash);
}
-static void cfq_remove_merge_hints(request_queue_t *q, struct cfq_rq *crq)
-{
- cfq_del_crq_hash(crq);
-
- if (q->last_merge == crq->request)
- q->last_merge = NULL;
-}
-
static inline void cfq_add_crq_hash(struct cfq_data *cfqd, struct cfq_rq *crq)
{
const int hash_idx = CFQ_MHASH_FN(rq_hash_key(crq->request));
@@ -347,18 +332,13 @@ static struct request *cfq_find_rq_hash(struct cfq_data *cfqd, sector_t offset)
return NULL;
}
-static inline int cfq_pending_requests(struct cfq_data *cfqd)
-{
- return !list_empty(&cfqd->queue->queue_head) || cfqd->busy_queues;
-}
-
/*
* scheduler run of queue, if there are requests pending and no one in the
* driver that will restart queueing
*/
static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
{
- if (!cfqd->rq_in_driver && cfq_pending_requests(cfqd))
+ if (!cfqd->rq_in_driver && cfqd->busy_queues)
kblockd_schedule_work(&cfqd->unplug_work);
}
@@ -366,7 +346,7 @@ static int cfq_queue_empty(request_queue_t *q)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
- return !cfq_pending_requests(cfqd);
+ return !cfqd->busy_queues;
}
/*
@@ -386,11 +366,6 @@ cfq_choose_req(struct cfq_data *cfqd, struct cfq_rq *crq1, struct cfq_rq *crq2)
if (crq2 == NULL)
return crq1;
- if (cfq_crq_requeued(crq1) && !cfq_crq_requeued(crq2))
- return crq1;
- else if (cfq_crq_requeued(crq2) && !cfq_crq_requeued(crq1))
- return crq2;
-
if (cfq_crq_is_sync(crq1) && !cfq_crq_is_sync(crq2))
return crq1;
else if (cfq_crq_is_sync(crq2) && !cfq_crq_is_sync(crq1))
@@ -461,10 +436,7 @@ cfq_find_next_crq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
struct cfq_rq *crq_next = NULL, *crq_prev = NULL;
struct rb_node *rbnext, *rbprev;
- rbnext = NULL;
- if (ON_RB(&last->rb_node))
- rbnext = rb_next(&last->rb_node);
- if (!rbnext) {
+ if (!(rbnext = rb_next(&last->rb_node))) {
rbnext = rb_first(&cfqq->sort_list);
if (rbnext == &last->rb_node)
rbnext = NULL;
@@ -545,13 +517,13 @@ static void cfq_resort_rr_list(struct cfq_queue *cfqq, int preempted)
* the pending list according to last request service
*/
static inline void
-cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq, int requeue)
+cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
BUG_ON(cfq_cfqq_on_rr(cfqq));
cfq_mark_cfqq_on_rr(cfqq);
cfqd->busy_queues++;
- cfq_resort_rr_list(cfqq, requeue);
+ cfq_resort_rr_list(cfqq, 0);
}
static inline void
@@ -571,22 +543,19 @@ cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
static inline void cfq_del_crq_rb(struct cfq_rq *crq)
{
struct cfq_queue *cfqq = crq->cfq_queue;
+ struct cfq_data *cfqd = cfqq->cfqd;
+ const int sync = cfq_crq_is_sync(crq);
- if (ON_RB(&crq->rb_node)) {
- struct cfq_data *cfqd = cfqq->cfqd;
- const int sync = cfq_crq_is_sync(crq);
+ BUG_ON(!cfqq->queued[sync]);
+ cfqq->queued[sync]--;
- BUG_ON(!cfqq->queued[sync]);
- cfqq->queued[sync]--;
+ cfq_update_next_crq(crq);
- cfq_update_next_crq(crq);
+ rb_erase(&crq->rb_node, &cfqq->sort_list);
+ RB_CLEAR_COLOR(&crq->rb_node);
- rb_erase(&crq->rb_node, &cfqq->sort_list);
- RB_CLEAR_COLOR(&crq->rb_node);
-
- if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY(&cfqq->sort_list))
- cfq_del_cfqq_rr(cfqd, cfqq);
- }
+ if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY(&cfqq->sort_list))
+ cfq_del_cfqq_rr(cfqd, cfqq);
}
static struct cfq_rq *
@@ -627,12 +596,12 @@ static void cfq_add_crq_rb(struct cfq_rq *crq)
* if that happens, put the alias on the dispatch list
*/
while ((__alias = __cfq_add_crq_rb(crq)) != NULL)
- cfq_dispatch_sort(cfqd->queue, __alias);
+ cfq_dispatch_insert(cfqd->queue, __alias);
rb_insert_color(&crq->rb_node, &cfqq->sort_list);
if (!cfq_cfqq_on_rr(cfqq))
- cfq_add_cfqq_rr(cfqd, cfqq, cfq_crq_requeued(crq));
+ cfq_add_cfqq_rr(cfqd, cfqq);
/*
* check if this request is a better next-serve candidate
@@ -643,10 +612,8 @@ static void cfq_add_crq_rb(struct cfq_rq *crq)
static inline void
cfq_reposition_crq_rb(struct cfq_queue *cfqq, struct cfq_rq *crq)
{
- if (ON_RB(&crq->rb_node)) {
- rb_erase(&crq->rb_node, &cfqq->sort_list);
- cfqq->queued[cfq_crq_is_sync(crq)]--;
- }
+ rb_erase(&crq->rb_node, &cfqq->sort_list);
+ cfqq->queued[cfq_crq_is_sync(crq)]--;
cfq_add_crq_rb(crq);
}
@@ -676,49 +643,28 @@ out:
return NULL;
}
-static void cfq_deactivate_request(request_queue_t *q, struct request *rq)
+static void cfq_activate_request(request_queue_t *q, struct request *rq)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
- struct cfq_rq *crq = RQ_DATA(rq);
-
- if (crq) {
- struct cfq_queue *cfqq = crq->cfq_queue;
-
- if (cfq_crq_in_driver(crq)) {
- cfq_clear_crq_in_driver(crq);
- WARN_ON(!cfqd->rq_in_driver);
- cfqd->rq_in_driver--;
- }
- if (cfq_crq_in_flight(crq)) {
- const int sync = cfq_crq_is_sync(crq);
- cfq_clear_crq_in_flight(crq);
- WARN_ON(!cfqq->on_dispatch[sync]);
- cfqq->on_dispatch[sync]--;
- }
- cfq_mark_crq_requeued(crq);
- }
+ cfqd->rq_in_driver++;
}
-/*
- * make sure the service time gets corrected on reissue of this request
- */
-static void cfq_requeue_request(request_queue_t *q, struct request *rq)
+static void cfq_deactivate_request(request_queue_t *q, struct request *rq)
{
- cfq_deactivate_request(q, rq);
- list_add(&rq->queuelist, &q->queue_head);
+ struct cfq_data *cfqd = q->elevator->elevator_data;
+
+ WARN_ON(!cfqd->rq_in_driver);
+ cfqd->rq_in_driver--;
}
-static void cfq_remove_request(request_queue_t *q, struct request *rq)
+static void cfq_remove_request(struct request *rq)
{
struct cfq_rq *crq = RQ_DATA(rq);
- if (crq) {
- list_del_init(&rq->queuelist);
- cfq_del_crq_rb(crq);
- cfq_remove_merge_hints(q, crq);
-
- }
+ list_del_init(&rq->queuelist);
+ cfq_del_crq_rb(crq);
+ cfq_del_crq_hash(crq);
}
static int
@@ -728,12 +674,6 @@ cfq_merge(request_queue_t *q, struct request **req, struct bio *bio)
struct request *__rq;
int ret;
- ret = elv_try_last_merge(q, bio);
- if (ret != ELEVATOR_NO_MERGE) {
- __rq = q->last_merge;
- goto out_insert;
- }
-
__rq = cfq_find_rq_hash(cfqd, bio->bi_sector);
if (__rq && elv_rq_merge_ok(__rq, bio)) {
ret = ELEVATOR_BACK_MERGE;
@@ -748,8 +688,6 @@ cfq_merge(request_queue_t *q, struct request **req, struct bio *bio)
return ELEVATOR_NO_MERGE;
out:
- q->last_merge = __rq;
-out_insert:
*req = __rq;
return ret;
}
@@ -762,14 +700,12 @@ static void cfq_merged_request(request_queue_t *q, struct request *req)
cfq_del_crq_hash(crq);
cfq_add_crq_hash(cfqd, crq);
- if (ON_RB(&crq->rb_node) && (rq_rb_key(req) != crq->rb_key)) {
+ if (rq_rb_key(req) != crq->rb_key) {
struct cfq_queue *cfqq = crq->cfq_queue;
cfq_update_next_crq(crq);
cfq_reposition_crq_rb(cfqq, crq);
}
-
- q->last_merge = req;
}
static void
@@ -785,7 +721,7 @@ cfq_merged_requests(request_queue_t *q, struct request *rq,
time_before(next->start_time, rq->start_time))
list_move(&rq->queuelist, &next->queuelist);
- cfq_remove_request(q, next);
+ cfq_remove_request(next);
}
static inline void
@@ -992,53 +928,15 @@ static int cfq_arm_slice_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
return 1;
}
-/*
- * we dispatch cfqd->cfq_quantum requests in total from the rr_list queues,
- * this function sector sorts the selected request to minimize seeks. we start
- * at cfqd->last_sector, not 0.
- */
-static void cfq_dispatch_sort(request_queue_t *q, struct cfq_rq *crq)
+static void cfq_dispatch_insert(request_queue_t *q, struct cfq_rq *crq)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
struct cfq_queue *cfqq = crq->cfq_queue;
- struct list_head *head = &q->queue_head, *entry = head;
- struct request *__rq;
- sector_t last;
-
- list_del(&crq->request->queuelist);
-
- last = cfqd->last_sector;
- list_for_each_entry_reverse(__rq, head, queuelist) {
- struct cfq_rq *__crq = RQ_DATA(__rq);
-
- if (blk_barrier_rq(__rq))
- break;
- if (!blk_fs_request(__rq))
- break;
- if (cfq_crq_requeued(__crq))
- break;
-
- if (__rq->sector <= crq->request->sector)
- break;
- if (__rq->sector > last && crq->request->sector < last) {
- last = crq->request->sector + crq->request->nr_sectors;
- break;
- }
- entry = &__rq->queuelist;
- }
-
- cfqd->last_sector = last;
cfqq->next_crq = cfq_find_next_crq(cfqd, cfqq, crq);
-
- cfq_del_crq_rb(crq);
- cfq_remove_merge_hints(q, crq);
-
- cfq_mark_crq_in_flight(crq);
- cfq_clear_crq_requeued(crq);
-
+ cfq_remove_request(crq->request);
cfqq->on_dispatch[cfq_crq_is_sync(crq)]++;
- list_add_tail(&crq->request->queuelist, entry);
+ elv_dispatch_sort(q, crq->request);
}
/*
@@ -1159,7 +1057,7 @@ __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq,
/*
* finally, insert request into driver dispatch list
*/
- cfq_dispatch_sort(cfqd->queue, crq);
+ cfq_dispatch_insert(cfqd->queue, crq);
cfqd->dispatch_slice++;
dispatched++;
@@ -1194,7 +1092,7 @@ __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq,
}
static int
-cfq_dispatch_requests(request_queue_t *q, int max_dispatch, int force)
+cfq_dispatch_requests(request_queue_t *q, int force)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
struct cfq_queue *cfqq;
@@ -1204,12 +1102,25 @@ cfq_dispatch_requests(request_queue_t *q, int max_dispatch, int force)
cfqq = cfq_select_queue(cfqd, force);
if (cfqq) {
+ int max_dispatch;
+
+ /*
+ * if idle window is disabled, allow queue buildup
+ */
+ if (!cfq_cfqq_idle_window(cfqq) &&
+ cfqd->rq_in_driver >= cfqd->cfq_max_depth)
+ return 0;
+
cfq_clear_cfqq_must_dispatch(cfqq);
cfq_clear_cfqq_wait_request(cfqq);
del_timer(&cfqd->idle_slice_timer);
- if (cfq_class_idle(cfqq))
- max_dispatch = 1;
+ if (!force) {
+ max_dispatch = cfqd->cfq_quantum;
+ if (cfq_class_idle(cfqq))
+ max_dispatch = 1;
+ } else
+ max_dispatch = INT_MAX;
return __cfq_dispatch_requests(cfqd, cfqq, max_dispatch);
}
@@ -1217,93 +1128,6 @@ cfq_dispatch_requests(request_queue_t *q, int max_dispatch, int force)
return 0;
}
-static inline void cfq_account_dispatch(struct cfq_rq *crq)
-{
- struct cfq_queue *cfqq = crq->cfq_queue;
- struct cfq_data *cfqd = cfqq->cfqd;
-
- if (unlikely(!blk_fs_request(crq->request)))
- return;
-
- /*
- * accounted bit is necessary since some drivers will call
- * elv_next_request() many times for the same request (eg ide)
- */
- if (cfq_crq_in_driver(crq))
- return;
-
- cfq_mark_crq_in_driver(crq);
- cfqd->rq_in_driver++;
-}
-
-static inline void
-cfq_account_completion(struct cfq_queue *cfqq, struct cfq_rq *crq)
-{
- struct cfq_data *cfqd = cfqq->cfqd;
- unsigned long now;
-
- if (!cfq_crq_in_driver(crq))
- return;
-
- now = jiffies;
-
- WARN_ON(!cfqd->rq_in_driver);
- cfqd->rq_in_driver--;
-
- if (!cfq_class_idle(cfqq))
- cfqd->last_end_request = now;
-
- if (!cfq_cfqq_dispatched(cfqq)) {
- if (cfq_cfqq_on_rr(cfqq)) {
- cfqq->service_last = now;
- cfq_resort_rr_list(cfqq, 0);
- }
- if (cfq_cfqq_expired(cfqq)) {
- __cfq_slice_expired(cfqd, cfqq, 0);
- cfq_schedule_dispatch(cfqd);
- }
- }
-
- if (cfq_crq_is_sync(crq))
- crq->io_context->last_end_request = now;
-}
-
-static struct request *cfq_next_request(request_queue_t *q)
-{
- struct cfq_data *cfqd = q->elevator->elevator_data;
- struct request *rq;
-
- if (!list_empty(&q->queue_head)) {
- struct cfq_rq *crq;
-dispatch:
- rq = list_entry_rq(q->queue_head.next);
-
- crq = RQ_DATA(rq);
- if (crq) {
- struct cfq_queue *cfqq = crq->cfq_queue;
-
- /*
- * if idle window is disabled, allow queue buildup
- */
- if (!cfq_crq_in_driver(crq) &&
- !cfq_cfqq_idle_window(cfqq) &&
- !blk_barrier_rq(rq) &&
- cfqd->rq_in_driver >= cfqd->cfq_max_depth)
- return NULL;
-
- cfq_remove_merge_hints(q, crq);
- cfq_account_dispatch(crq);
- }
-
- return rq;
- }
-
- if (cfq_dispatch_requests(q, cfqd->cfq_quantum, 0))
- goto dispatch;
-
- return NULL;
-}
-
/*
* task holds one reference to the queue, dropped when task exits. each crq
* in-flight on this queue also holds a reference, dropped when crq is freed.
@@ -1422,7 +1246,7 @@ static void cfq_exit_io_context(struct cfq_io_context *cic)
}
static struct cfq_io_context *
-cfq_alloc_io_context(struct cfq_data *cfqd, int gfp_mask)
+cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
{
struct cfq_io_context *cic = kmem_cache_alloc(cfq_ioc_pool, gfp_mask);
@@ -1517,7 +1341,7 @@ static int cfq_ioc_set_ioprio(struct io_context *ioc, unsigned int ioprio)
static struct cfq_queue *
cfq_get_queue(struct cfq_data *cfqd, unsigned int key, unsigned short ioprio,
- int gfp_mask)
+ gfp_t gfp_mask)
{
const int hashval = hash_long(key, CFQ_QHASH_SHIFT);
struct cfq_queue *cfqq, *new_cfqq = NULL;
@@ -1578,7 +1402,7 @@ out:
* cfqq, so we don't need to worry about it disappearing
*/
static struct cfq_io_context *
-cfq_get_io_context(struct cfq_data *cfqd, pid_t pid, int gfp_mask)
+cfq_get_io_context(struct cfq_data *cfqd, pid_t pid, gfp_t gfp_mask)
{
struct io_context *ioc = NULL;
struct cfq_io_context *cic;
@@ -1816,8 +1640,9 @@ cfq_crq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
}
}
-static void cfq_enqueue(struct cfq_data *cfqd, struct request *rq)
+static void cfq_insert_request(request_queue_t *q, struct request *rq)
{
+ struct cfq_data *cfqd = q->elevator->elevator_data;
struct cfq_rq *crq = RQ_DATA(rq);
struct cfq_queue *cfqq = crq->cfq_queue;
@@ -1827,66 +1652,43 @@ static void cfq_enqueue(struct cfq_data *cfqd, struct request *rq)
list_add_tail(&rq->queuelist, &cfqq->fifo);
- if (rq_mergeable(rq)) {
+ if (rq_mergeable(rq))
cfq_add_crq_hash(cfqd, crq);
- if (!cfqd->queue->last_merge)
- cfqd->queue->last_merge = rq;
- }
-
cfq_crq_enqueued(cfqd, cfqq, crq);
}
-static void
-cfq_insert_request(request_queue_t *q, struct request *rq, int where)
-{
- struct cfq_data *cfqd = q->elevator->elevator_data;
-
- switch (where) {
- case ELEVATOR_INSERT_BACK:
- while (cfq_dispatch_requests(q, INT_MAX, 1))
- ;
- list_add_tail(&rq->queuelist, &q->queue_head);
- /*
- * If we were idling with pending requests on
- * inactive cfqqs, force dispatching will
- * remove the idle timer and the queue won't
- * be kicked by __make_request() afterward.
- * Kick it here.
- */
- cfq_schedule_dispatch(cfqd);
- break;
- case ELEVATOR_INSERT_FRONT:
- list_add(&rq->queuelist, &q->queue_head);
- break;
- case ELEVATOR_INSERT_SORT:
- BUG_ON(!blk_fs_request(rq));
- cfq_enqueue(cfqd, rq);
- break;
- default:
- printk("%s: bad insert point %d\n", __FUNCTION__,where);
- return;
- }
-}
-
static void cfq_completed_request(request_queue_t *q, struct request *rq)
{
struct cfq_rq *crq = RQ_DATA(rq);
- struct cfq_queue *cfqq;
+ struct cfq_queue *cfqq = crq->cfq_queue;
+ struct cfq_data *cfqd = cfqq->cfqd;
+ const int sync = cfq_crq_is_sync(crq);
+ unsigned long now;
- if (unlikely(!blk_fs_request(rq)))
- return;
+ now = jiffies;
- cfqq = crq->cfq_queue;
+ WARN_ON(!cfqd->rq_in_driver);
+ WARN_ON(!cfqq->on_dispatch[sync]);
+ cfqd->rq_in_driver--;
+ cfqq->on_dispatch[sync]--;
- if (cfq_crq_in_flight(crq)) {
- const int sync = cfq_crq_is_sync(crq);
+ if (!cfq_class_idle(cfqq))
+ cfqd->last_end_request = now;
- WARN_ON(!cfqq->on_dispatch[sync]);
- cfqq->on_dispatch[sync]--;
+ if (!cfq_cfqq_dispatched(cfqq)) {
+ if (cfq_cfqq_on_rr(cfqq)) {
+ cfqq->service_last = now;
+ cfq_resort_rr_list(cfqq, 0);
+ }
+ if (cfq_cfqq_expired(cfqq)) {
+ __cfq_slice_expired(cfqd, cfqq, 0);
+ cfq_schedule_dispatch(cfqd);
+ }
}
- cfq_account_completion(cfqq, crq);
+ if (cfq_crq_is_sync(crq))
+ crq->io_context->last_end_request = now;
}
static struct request *
@@ -2075,7 +1877,7 @@ static void cfq_put_request(request_queue_t *q, struct request *rq)
*/
static int
cfq_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
- int gfp_mask)
+ gfp_t gfp_mask)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
struct task_struct *tsk = current;
@@ -2118,9 +1920,6 @@ cfq_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
INIT_HLIST_NODE(&crq->hash);
crq->cfq_queue = cfqq;
crq->io_context = cic;
- cfq_clear_crq_in_flight(crq);
- cfq_clear_crq_in_driver(crq);
- cfq_clear_crq_requeued(crq);
if (rw == READ || process_sync(tsk))
cfq_mark_crq_is_sync(crq);
@@ -2201,7 +2000,7 @@ static void cfq_idle_slice_timer(unsigned long data)
* only expire and reinvoke request handler, if there are
* other queues with pending requests
*/
- if (!cfq_pending_requests(cfqd)) {
+ if (!cfqd->busy_queues) {
cfqd->idle_slice_timer.expires = min(now + cfqd->cfq_slice_idle, cfqq->slice_end);
add_timer(&cfqd->idle_slice_timer);
goto out_cont;
@@ -2576,10 +2375,9 @@ static struct elevator_type iosched_cfq = {
.elevator_merge_fn = cfq_merge,
.elevator_merged_fn = cfq_merged_request,
.elevator_merge_req_fn = cfq_merged_requests,
- .elevator_next_req_fn = cfq_next_request,
+ .elevator_dispatch_fn = cfq_dispatch_requests,
.elevator_add_req_fn = cfq_insert_request,
- .elevator_remove_req_fn = cfq_remove_request,
- .elevator_requeue_req_fn = cfq_requeue_request,
+ .elevator_activate_req_fn = cfq_activate_request,
.elevator_deactivate_req_fn = cfq_deactivate_request,
.elevator_queue_empty_fn = cfq_queue_empty,
.elevator_completed_req_fn = cfq_completed_request,
diff --git a/drivers/block/deadline-iosched.c b/drivers/block/deadline-iosched.c
index 52a3ae5289a..7929471d7df 100644
--- a/drivers/block/deadline-iosched.c
+++ b/drivers/block/deadline-iosched.c
@@ -50,7 +50,6 @@ struct deadline_data {
* next in sort order. read, write or both are NULL
*/
struct deadline_rq *next_drq[2];
- struct list_head *dispatch; /* driver dispatch queue */
struct list_head *hash; /* request hash */
unsigned int batching; /* number of sequential requests made */
sector_t last_sector; /* head position */
@@ -113,15 +112,6 @@ static inline void deadline_del_drq_hash(struct deadline_rq *drq)
__deadline_del_drq_hash(drq);
}
-static void
-deadline_remove_merge_hints(request_queue_t *q, struct deadline_rq *drq)
-{
- deadline_del_drq_hash(drq);
-
- if (q->last_merge == drq->request)
- q->last_merge = NULL;
-}
-
static inline void
deadline_add_drq_hash(struct deadline_data *dd, struct deadline_rq *drq)
{
@@ -239,10 +229,9 @@ deadline_del_drq_rb(struct deadline_data *dd, struct deadline_rq *drq)
dd->next_drq[data_dir] = rb_entry_drq(rbnext);
}
- if (ON_RB(&drq->rb_node)) {
- rb_erase(&drq->rb_node, DRQ_RB_ROOT(dd, drq));
- RB_CLEAR(&drq->rb_node);
- }
+ BUG_ON(!ON_RB(&drq->rb_node));
+ rb_erase(&drq->rb_node, DRQ_RB_ROOT(dd, drq));
+ RB_CLEAR(&drq->rb_node);
}
static struct request *
@@ -286,7 +275,7 @@ deadline_find_first_drq(struct deadline_data *dd, int data_dir)
/*
* add drq to rbtree and fifo
*/
-static inline void
+static void
deadline_add_request(struct request_queue *q, struct request *rq)
{
struct deadline_data *dd = q->elevator->elevator_data;
@@ -301,12 +290,8 @@ deadline_add_request(struct request_queue *q, struct request *rq)
drq->expires = jiffies + dd->fifo_expire[data_dir];
list_add_tail(&drq->fifo, &dd->fifo_list[data_dir]);
- if (rq_mergeable(rq)) {
+ if (rq_mergeable(rq))
deadline_add_drq_hash(dd, drq);
-
- if (!q->last_merge)
- q->last_merge = rq;
- }
}
/*
@@ -315,14 +300,11 @@ deadline_add_request(struct request_queue *q, struct request *rq)
static void deadline_remove_request(request_queue_t *q, struct request *rq)
{
struct deadline_rq *drq = RQ_DATA(rq);
+ struct deadline_data *dd = q->elevator->elevator_data;
- if (drq) {
- struct deadline_data *dd = q->elevator->elevator_data;
-
- list_del_init(&drq->fifo);
- deadline_remove_merge_hints(q, drq);
- deadline_del_drq_rb(dd, drq);
- }
+ list_del_init(&drq->fifo);
+ deadline_del_drq_rb(dd, drq);
+ deadline_del_drq_hash(drq);
}
static int
@@ -333,15 +315,6 @@ deadline_merge(request_queue_t *q, struct request **req, struct bio *bio)
int ret;
/*
- * try last_merge to avoid going to hash
- */
- ret = elv_try_last_merge(q, bio);
- if (ret != ELEVATOR_NO_MERGE) {
- __rq = q->last_merge;
- goto out_insert;
- }
-
- /*
* see if the merge hash can satisfy a back merge
*/
__rq = deadline_find_drq_hash(dd, bio->bi_sector);
@@ -373,8 +346,6 @@ deadline_merge(request_queue_t *q, struct request **req, struct bio *bio)
return ELEVATOR_NO_MERGE;
out:
- q->last_merge = __rq;
-out_insert:
if (ret)
deadline_hot_drq_hash(dd, RQ_DATA(__rq));
*req = __rq;
@@ -399,8 +370,6 @@ static void deadline_merged_request(request_queue_t *q, struct request *req)
deadline_del_drq_rb(dd, drq);
deadline_add_drq_rb(dd, drq);
}
-
- q->last_merge = req;
}
static void
@@ -452,7 +421,7 @@ deadline_move_to_dispatch(struct deadline_data *dd, struct deadline_rq *drq)
request_queue_t *q = drq->request->q;
deadline_remove_request(q, drq->request);
- list_add_tail(&drq->request->queuelist, dd->dispatch);
+ elv_dispatch_add_tail(q, drq->request);
}
/*
@@ -502,8 +471,9 @@ static inline int deadline_check_fifo(struct deadline_data *dd, int ddir)
* deadline_dispatch_requests selects the best request according to
* read/write expire, fifo_batch, etc
*/
-static int deadline_dispatch_requests(struct deadline_data *dd)
+static int deadline_dispatch_requests(request_queue_t *q, int force)
{
+ struct deadline_data *dd = q->elevator->elevator_data;
const int reads = !list_empty(&dd->fifo_list[READ]);
const int writes = !list_empty(&dd->fifo_list[WRITE]);
struct deadline_rq *drq;
@@ -597,65 +567,12 @@ dispatch_request:
return 1;
}
-static struct request *deadline_next_request(request_queue_t *q)
-{
- struct deadline_data *dd = q->elevator->elevator_data;
- struct request *rq;
-
- /*
- * if there are still requests on the dispatch queue, grab the first one
- */
- if (!list_empty(dd->dispatch)) {
-dispatch:
- rq = list_entry_rq(dd->dispatch->next);
- return rq;
- }
-
- if (deadline_dispatch_requests(dd))
- goto dispatch;
-
- return NULL;
-}
-
-static void
-deadline_insert_request(request_queue_t *q, struct request *rq, int where)
-{
- struct deadline_data *dd = q->elevator->elevator_data;
-
- /* barriers must flush the reorder queue */
- if (unlikely(rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)
- && where == ELEVATOR_INSERT_SORT))
- where = ELEVATOR_INSERT_BACK;
-
- switch (where) {
- case ELEVATOR_INSERT_BACK:
- while (deadline_dispatch_requests(dd))
- ;
- list_add_tail(&rq->queuelist, dd->dispatch);
- break;
- case ELEVATOR_INSERT_FRONT:
- list_add(&rq->queuelist, dd->dispatch);
- break;
- case ELEVATOR_INSERT_SORT:
- BUG_ON(!blk_fs_request(rq));
- deadline_add_request(q, rq);
- break;
- default:
- printk("%s: bad insert point %d\n", __FUNCTION__,where);
- return;
- }
-}
-
static int deadline_queue_empty(request_queue_t *q)
{
struct deadline_data *dd = q->elevator->elevator_data;
- if (!list_empty(&dd->fifo_list[WRITE])
- || !list_empty(&dd->fifo_list[READ])
- || !list_empty(dd->dispatch))
- return 0;
-
- return 1;
+ return list_empty(&dd->fifo_list[WRITE])
+ && list_empty(&dd->fifo_list[READ]);
}
static struct request *
@@ -733,7 +650,6 @@ static int deadline_init_queue(request_queue_t *q, elevator_t *e)
INIT_LIST_HEAD(&dd->fifo_list[WRITE]);
dd->sort_list[READ] = RB_ROOT;
dd->sort_list[WRITE] = RB_ROOT;
- dd->dispatch = &q->queue_head;
dd->fifo_expire[READ] = read_expire;
dd->fifo_expire[WRITE] = write_expire;
dd->writes_starved = writes_starved;
@@ -748,15 +664,13 @@ static void deadline_put_request(request_queue_t *q, struct request *rq)
struct deadline_data *dd = q->elevator->elevator_data;
struct deadline_rq *drq = RQ_DATA(rq);
- if (drq) {
- mempool_free(drq, dd->drq_pool);
- rq->elevator_private = NULL;
- }
+ mempool_free(drq, dd->drq_pool);
+ rq->elevator_private = NULL;
}
static int
deadline_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
- int gfp_mask)
+ gfp_t gfp_mask)
{
struct deadline_data *dd = q->elevator->elevator_data;
struct deadline_rq *drq;
@@ -917,9 +831,8 @@ static struct elevator_type iosched_deadline = {
.elevator_merge_fn = deadline_merge,
.elevator_merged_fn = deadline_merged_request,
.elevator_merge_req_fn = deadline_merged_requests,
- .elevator_next_req_fn = deadline_next_request,
- .elevator_add_req_fn = deadline_insert_request,
- .elevator_remove_req_fn = deadline_remove_request,
+ .elevator_dispatch_fn = deadline_dispatch_requests,
+ .elevator_add_req_fn = deadline_add_request,
.elevator_queue_empty_fn = deadline_queue_empty,
.elevator_former_req_fn = deadline_former_request,
.elevator_latter_req_fn = deadline_latter_request,
diff --git a/drivers/block/elevator.c b/drivers/block/elevator.c
index 98f0126a2de..55621d5c577 100644
--- a/drivers/block/elevator.c
+++ b/drivers/block/elevator.c
@@ -34,6 +34,7 @@
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/compiler.h>
+#include <linux/delay.h>
#include <asm/uaccess.h>
@@ -83,21 +84,11 @@ inline int elv_try_merge(struct request *__rq, struct bio *bio)
}
EXPORT_SYMBOL(elv_try_merge);
-inline int elv_try_last_merge(request_queue_t *q, struct bio *bio)
-{
- if (q->last_merge)
- return elv_try_merge(q->last_merge, bio);
-
- return ELEVATOR_NO_MERGE;
-}
-EXPORT_SYMBOL(elv_try_last_merge);
-
static struct elevator_type *elevator_find(const char *name)
{
struct elevator_type *e = NULL;
struct list_head *entry;
- spin_lock_irq(&elv_list_lock);
list_for_each(entry, &elv_list) {
struct elevator_type *__e;
@@ -108,7 +99,6 @@ static struct elevator_type *elevator_find(const char *name)
break;
}
}
- spin_unlock_irq(&elv_list_lock);
return e;
}
@@ -120,12 +110,15 @@ static void elevator_put(struct elevator_type *e)
static struct elevator_type *elevator_get(const char *name)
{
- struct elevator_type *e = elevator_find(name);
+ struct elevator_type *e;
- if (!e)
- return NULL;
- if (!try_module_get(e->elevator_owner))
- return NULL;
+ spin_lock_irq(&elv_list_lock);
+
+ e = elevator_find(name);
+ if (e && !try_module_get(e->elevator_owner))
+ e = NULL;
+
+ spin_unlock_irq(&elv_list_lock);
return e;
}
@@ -139,8 +132,6 @@ static int elevator_attach(request_queue_t *q, struct elevator_type *e,
eq->ops = &e->ops;
eq->elevator_type = e;
- INIT_LIST_HEAD(&q->queue_head);
- q->last_merge = NULL;
q->elevator = eq;
if (eq->ops->elevator_init_fn)
@@ -153,11 +144,15 @@ static char chosen_elevator[16];
static void elevator_setup_default(void)
{
+ struct elevator_type *e;
+
/*
* check if default is set and exists
*/
- if (chosen_elevator[0] && elevator_find(chosen_elevator))
+ if (chosen_elevator[0] && (e = elevator_get(chosen_elevator))) {
+ elevator_put(e);
return;
+ }
#if defined(CONFIG_IOSCHED_AS)
strcpy(chosen_elevator, "anticipatory");
@@ -186,6 +181,11 @@ int elevator_init(request_queue_t *q, char *name)
struct elevator_queue *eq;
int ret = 0;
+ INIT_LIST_HEAD(&q->queue_head);
+ q->last_merge = NULL;
+ q->end_sector = 0;
+ q->boundary_rq = NULL;
+
elevator_setup_default();
if (!name)
@@ -220,9 +220,52 @@ void elevator_exit(elevator_t *e)
kfree(e);
}
+/*
+ * Insert rq into dispatch queue of q. Queue lock must be held on
+ * entry. If sort != 0, rq is sort-inserted; otherwise, rq will be
+ * appended to the dispatch queue. To be used by specific elevators.
+ */
+void elv_dispatch_sort(request_queue_t *q, struct request *rq)
+{
+ sector_t boundary;
+ struct list_head *entry;
+
+ if (q->last_merge == rq)
+ q->last_merge = NULL;
+
+ boundary = q->end_sector;
+
+ list_for_each_prev(entry, &q->queue_head) {
+ struct request *pos = list_entry_rq(entry);
+
+ if (pos->flags & (REQ_SOFTBARRIER|REQ_HARDBARRIER|REQ_STARTED))
+ break;
+ if (rq->sector >= boundary) {
+ if (pos->sector < boundary)
+ continue;
+ } else {
+ if (pos->sector >= boundary)
+ break;
+ }
+ if (rq->sector >= pos->sector)
+ break;
+ }
+
+ list_add(&rq->queuelist, entry);
+}
+
int elv_merge(request_queue_t *q, struct request **req, struct bio *bio)
{
elevator_t *e = q->elevator;
+ int ret;
+
+ if (q->last_merge) {
+ ret = elv_try_merge(q->last_merge, bio);
+ if (ret != ELEVATOR_NO_MERGE) {
+ *req = q->last_merge;
+ return ret;
+ }
+ }
if (e->ops->elevator_merge_fn)
return e->ops->elevator_merge_fn(q, req, bio);
@@ -236,6 +279,8 @@ void elv_merged_request(request_queue_t *q, struct request *rq)
if (e->ops->elevator_merged_fn)
e->ops->elevator_merged_fn(q, rq);
+
+ q->last_merge = rq;
}
void elv_merge_requests(request_queue_t *q, struct request *rq,
@@ -243,20 +288,13 @@ void elv_merge_requests(request_queue_t *q, struct request *rq,
{
elevator_t *e = q->elevator;
- if (q->last_merge == next)
- q->last_merge = NULL;
-
if (e->ops->elevator_merge_req_fn)
e->ops->elevator_merge_req_fn(q, rq, next);
+
+ q->last_merge = rq;
}
-/*
- * For careful internal use by the block layer. Essentially the same as
- * a requeue in that it tells the io scheduler that this request is not
- * active in the driver or hardware anymore, but we don't want the request
- * added back to the scheduler. Function is not exported.
- */
-void elv_deactivate_request(request_queue_t *q, struct request *rq)
+void elv_requeue_request(request_queue_t *q, struct request *rq)
{
elevator_t *e = q->elevator;
@@ -264,19 +302,14 @@ void elv_deactivate_request(request_queue_t *q, struct request *rq)
* it already went through dequeue, we need to decrement the
* in_flight count again
*/
- if (blk_account_rq(rq))
+ if (blk_account_rq(rq)) {
q->in_flight--;
+ if (blk_sorted_rq(rq) && e->ops->elevator_deactivate_req_fn)
+ e->ops->elevator_deactivate_req_fn(q, rq);
+ }
rq->flags &= ~REQ_STARTED;
- if (e->ops->elevator_deactivate_req_fn)
- e->ops->elevator_deactivate_req_fn(q, rq);
-}
-
-void elv_requeue_request(request_queue_t *q, struct request *rq)
-{
- elv_deactivate_request(q, rq);
-
/*
* if this is the flush, requeue the original instead and drop the flush
*/
@@ -285,31 +318,27 @@ void elv_requeue_request(request_queue_t *q, struct request *rq)
rq = rq->end_io_data;
}
- /*
- * the request is prepped and may have some resources allocated.
- * allowing unprepped requests to pass this one may cause resource
- * deadlock. turn on softbarrier.
- */
- rq->flags |= REQ_SOFTBARRIER;
-
- /*
- * if iosched has an explicit requeue hook, then use that. otherwise
- * just put the request at the front of the queue
- */
- if (q->elevator->ops->elevator_requeue_req_fn)
- q->elevator->ops->elevator_requeue_req_fn(q, rq);
- else
- __elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 0);
+ __elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 0);
}
void __elv_add_request(request_queue_t *q, struct request *rq, int where,
int plug)
{
- /*
- * barriers implicitly indicate back insertion
- */
- if (rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER) &&
- where == ELEVATOR_INSERT_SORT)
+ if (rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) {
+ /*
+ * barriers implicitly indicate back insertion
+ */
+ if (where == ELEVATOR_INSERT_SORT)
+ where = ELEVATOR_INSERT_BACK;
+
+ /*
+ * this request is scheduling boundary, update end_sector
+ */
+ if (blk_fs_request(rq)) {
+ q->end_sector = rq_end_sector(rq);
+ q->boundary_rq = rq;
+ }
+ } else if (!(rq->flags & REQ_ELVPRIV) && where == ELEVATOR_INSERT_SORT)
where = ELEVATOR_INSERT_BACK;
if (plug)
@@ -317,23 +346,54 @@ void __elv_add_request(request_queue_t *q, struct request *rq, int where,
rq->q = q;
- if (!test_bit(QUEUE_FLAG_DRAIN, &q->queue_flags)) {
- q->elevator->ops->elevator_add_req_fn(q, rq, where);
+ switch (where) {
+ case ELEVATOR_INSERT_FRONT:
+ rq->flags |= REQ_SOFTBARRIER;
- if (blk_queue_plugged(q)) {
- int nrq = q->rq.count[READ] + q->rq.count[WRITE]
- - q->in_flight;
+ list_add(&rq->queuelist, &q->queue_head);
+ break;
- if (nrq >= q->unplug_thresh)
- __generic_unplug_device(q);
- }
- } else
+ case ELEVATOR_INSERT_BACK:
+ rq->flags |= REQ_SOFTBARRIER;
+
+ while (q->elevator->ops->elevator_dispatch_fn(q, 1))
+ ;
+ list_add_tail(&rq->queuelist, &q->queue_head);
/*
- * if drain is set, store the request "locally". when the drain
- * is finished, the requests will be handed ordered to the io
- * scheduler
+ * We kick the queue here for the following reasons.
+ * - The elevator might have returned NULL previously
+ * to delay requests and returned them now. As the
+ * queue wasn't empty before this request, ll_rw_blk
+ * won't run the queue on return, resulting in hang.
+ * - Usually, back inserted requests won't be merged
+ * with anything. There's no point in delaying queue
+ * processing.
*/
- list_add_tail(&rq->queuelist, &q->drain_list);
+ blk_remove_plug(q);
+ q->request_fn(q);
+ break;
+
+ case ELEVATOR_INSERT_SORT:
+ BUG_ON(!blk_fs_request(rq));
+ rq->flags |= REQ_SORTED;
+ q->elevator->ops->elevator_add_req_fn(q, rq);
+ if (q->last_merge == NULL && rq_mergeable(rq))
+ q->last_merge = rq;
+ break;
+
+ default:
+ printk(KERN_ERR "%s: bad insertion point %d\n",
+ __FUNCTION__, where);
+ BUG();
+ }
+
+ if (blk_queue_plugged(q)) {
+ int nrq = q->rq.count[READ] + q->rq.count[WRITE]
+ - q->in_flight;
+
+ if (nrq >= q->unplug_thresh)
+ __generic_unplug_device(q);
+ }
}
void elv_add_request(request_queue_t *q, struct request *rq, int where,
@@ -348,13 +408,19 @@ void elv_add_request(request_queue_t *q, struct request *rq, int where,
static inline struct request *__elv_next_request(request_queue_t *q)
{
- struct request *rq = q->elevator->ops->elevator_next_req_fn(q);
+ struct request *rq;
+
+ if (unlikely(list_empty(&q->queue_head) &&
+ !q->elevator->ops->elevator_dispatch_fn(q, 0)))
+ return NULL;
+
+ rq = list_entry_rq(q->queue_head.next);
/*
* if this is a barrier write and the device has to issue a
* flush sequence to support it, check how far we are
*/
- if (rq && blk_fs_request(rq) && blk_barrier_rq(rq)) {
+ if (blk_fs_request(rq) && blk_barrier_rq(rq)) {
BUG_ON(q->ordered == QUEUE_ORDERED_NONE);
if (q->ordered == QUEUE_ORDERED_FLUSH &&
@@ -371,15 +437,30 @@ struct request *elv_next_request(request_queue_t *q)
int ret;
while ((rq = __elv_next_request(q)) != NULL) {
- /*
- * just mark as started even if we don't start it, a request
- * that has been delayed should not be passed by new incoming
- * requests
- */
- rq->flags |= REQ_STARTED;
+ if (!(rq->flags & REQ_STARTED)) {
+ elevator_t *e = q->elevator;
- if (rq == q->last_merge)
- q->last_merge = NULL;
+ /*
+ * This is the first time the device driver
+ * sees this request (possibly after
+ * requeueing). Notify IO scheduler.
+ */
+ if (blk_sorted_rq(rq) &&
+ e->ops->elevator_activate_req_fn)
+ e->ops->elevator_activate_req_fn(q, rq);
+
+ /*
+ * just mark as started even if we don't start
+ * it, a request that has been delayed should
+ * not be passed by new incoming requests
+ */
+ rq->flags |= REQ_STARTED;
+ }
+
+ if (!q->boundary_rq || q->boundary_rq == rq) {
+ q->end_sector = rq_end_sector(rq);
+ q->boundary_rq = NULL;
+ }
if ((rq->flags & REQ_DONTPREP) || !q->prep_rq_fn)
break;
@@ -391,9 +472,9 @@ struct request *elv_next_request(request_queue_t *q)
/*
* the request may have been (partially) prepped.
* we need to keep this request in the front to
- * avoid resource deadlock. turn on softbarrier.
+ * avoid resource deadlock. REQ_STARTED will
+ * prevent other fs requests from passing this one.
*/
- rq->flags |= REQ_SOFTBARRIER;
rq = NULL;
break;
} else if (ret == BLKPREP_KILL) {
@@ -416,42 +497,32 @@ struct request *elv_next_request(request_queue_t *q)
return rq;
}
-void elv_remove_request(request_queue_t *q, struct request *rq)
+void elv_dequeue_request(request_queue_t *q, struct request *rq)
{
- elevator_t *e = q->elevator;
+ BUG_ON(list_empty(&rq->queuelist));
+
+ list_del_init(&rq->queuelist);
/*
* the time frame between a request being removed from the lists
* and to it is freed is accounted as io that is in progress at
- * the driver side. note that we only account requests that the
- * driver has seen (REQ_STARTED set), to avoid false accounting
- * for request-request merges
+ * the driver side.
*/
if (blk_account_rq(rq))
q->in_flight++;
-
- /*
- * the main clearing point for q->last_merge is on retrieval of
- * request by driver (it calls elv_next_request()), but it _can_
- * also happen here if a request is added to the queue but later
- * deleted without ever being given to driver (merged with another
- * request).
- */
- if (rq == q->last_merge)
- q->last_merge = NULL;
-
- if (e->ops->elevator_remove_req_fn)
- e->ops->elevator_remove_req_fn(q, rq);
}
int elv_queue_empty(request_queue_t *q)
{
elevator_t *e = q->elevator;
+ if (!list_empty(&q->queue_head))
+ return 0;
+
if (e->ops->elevator_queue_empty_fn)
return e->ops->elevator_queue_empty_fn(q);
- return list_empty(&q->queue_head);
+ return 1;
}
struct request *elv_latter_request(request_queue_t *q, struct request *rq)
@@ -487,7 +558,7 @@ struct request *elv_former_request(request_queue_t *q, struct request *rq)
}
int elv_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
- int gfp_mask)
+ gfp_t gfp_mask)
{
elevator_t *e = q->elevator;
@@ -523,11 +594,11 @@ void elv_completed_request(request_queue_t *q, struct request *rq)
/*
* request is released from the driver, io must be done
*/
- if (blk_account_rq(rq))
+ if (blk_account_rq(rq)) {
q->in_flight--;
-
- if (e->ops->elevator_completed_req_fn)
- e->ops->elevator_completed_req_fn(q, rq);
+ if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn)
+ e->ops->elevator_completed_req_fn(q, rq);
+ }
}
int elv_register_queue(struct request_queue *q)
@@ -555,10 +626,9 @@ void elv_unregister_queue(struct request_queue *q)
int elv_register(struct elevator_type *e)
{
+ spin_lock_irq(&elv_list_lock);
if (elevator_find(e->elevator_name))
BUG();
-
- spin_lock_irq(&elv_list_lock);
list_add_tail(&e->list, &elv_list);
spin_unlock_irq(&elv_list_lock);
@@ -582,25 +652,36 @@ EXPORT_SYMBOL_GPL(elv_unregister);
* switch to new_e io scheduler. be careful not to introduce deadlocks -
* we don't free the old io scheduler, before we have allocated what we
* need for the new one. this way we have a chance of going back to the old
- * one, if the new one fails init for some reason. we also do an intermediate
- * switch to noop to ensure safety with stack-allocated requests, since they
- * don't originate from the block layer allocator. noop is safe here, because
- * it never needs to touch the elevator itself for completion events. DRAIN
- * flags will make sure we don't touch it for additions either.
+ * one, if the new one fails init for some reason.
*/
static void elevator_switch(request_queue_t *q, struct elevator_type *new_e)
{
- elevator_t *e = kmalloc(sizeof(elevator_t), GFP_KERNEL);
- struct elevator_type *noop_elevator = NULL;
- elevator_t *old_elevator;
+ elevator_t *old_elevator, *e;
+ /*
+ * Allocate new elevator
+ */
+ e = kmalloc(sizeof(elevator_t), GFP_KERNEL);
if (!e)
goto error;
/*
- * first step, drain requests from the block freelist
+ * Turn on BYPASS and drain all requests w/ elevator private data
*/
- blk_wait_queue_drained(q, 0);
+ spin_lock_irq(q->queue_lock);
+
+ set_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
+
+ while (q->elevator->ops->elevator_dispatch_fn(q, 1))
+ ;
+
+ while (q->rq.elvpriv) {
+ spin_unlock_irq(q->queue_lock);
+ msleep(10);
+ spin_lock_irq(q->queue_lock);
+ }
+
+ spin_unlock_irq(q->queue_lock);
/*
* unregister old elevator data
@@ -609,18 +690,6 @@ static void elevator_switch(request_queue_t *q, struct elevator_type *new_e)
old_elevator = q->elevator;
/*
- * next step, switch to noop since it uses no private rq structures
- * and doesn't allocate any memory for anything. then wait for any
- * non-fs requests in-flight
- */
- noop_elevator = elevator_get("noop");
- spin_lock_irq(q->queue_lock);
- elevator_attach(q, noop_elevator, e);
- spin_unlock_irq(q->queue_lock);
-
- blk_wait_queue_drained(q, 1);
-
- /*
* attach and start new elevator
*/
if (elevator_attach(q, new_e, e))
@@ -630,11 +699,10 @@ static void elevator_switch(request_queue_t *q, struct elevator_type *new_e)
goto fail_register;
/*
- * finally exit old elevator and start queue again
+ * finally exit old elevator and turn off BYPASS.
*/
elevator_exit(old_elevator);
- blk_finish_queue_drain(q);
- elevator_put(noop_elevator);
+ clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
return;
fail_register:
@@ -643,13 +711,13 @@ fail_register:
* one again (along with re-adding the sysfs dir)
*/
elevator_exit(e);
+ e = NULL;
fail:
q->elevator = old_elevator;
elv_register_queue(q);
- blk_finish_queue_drain(q);
+ clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
+ kfree(e);
error:
- if (noop_elevator)
- elevator_put(noop_elevator);
elevator_put(new_e);
printk(KERN_ERR "elevator: switch to %s failed\n",new_e->elevator_name);
}
@@ -701,11 +769,12 @@ ssize_t elv_iosched_show(request_queue_t *q, char *name)
return len;
}
+EXPORT_SYMBOL(elv_dispatch_sort);
EXPORT_SYMBOL(elv_add_request);
EXPORT_SYMBOL(__elv_add_request);
EXPORT_SYMBOL(elv_requeue_request);
EXPORT_SYMBOL(elv_next_request);
-EXPORT_SYMBOL(elv_remove_request);
+EXPORT_SYMBOL(elv_dequeue_request);
EXPORT_SYMBOL(elv_queue_empty);
EXPORT_SYMBOL(elv_completed_request);
EXPORT_SYMBOL(elevator_exit);
diff --git a/drivers/block/ll_rw_blk.c b/drivers/block/ll_rw_blk.c
index baedac52294..0af73512b9a 100644
--- a/drivers/block/ll_rw_blk.c
+++ b/drivers/block/ll_rw_blk.c
@@ -263,8 +263,6 @@ void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn)
blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
blk_queue_activity_fn(q, NULL, NULL);
-
- INIT_LIST_HEAD(&q->drain_list);
}
EXPORT_SYMBOL(blk_queue_make_request);
@@ -353,6 +351,8 @@ static void blk_pre_flush_end_io(struct request *flush_rq)
struct request *rq = flush_rq->end_io_data;
request_queue_t *q = rq->q;
+ elv_completed_request(q, flush_rq);
+
rq->flags |= REQ_BAR_PREFLUSH;
if (!flush_rq->errors)
@@ -369,6 +369,8 @@ static void blk_post_flush_end_io(struct request *flush_rq)
struct request *rq = flush_rq->end_io_data;
request_queue_t *q = rq->q;
+ elv_completed_request(q, flush_rq);
+
rq->flags |= REQ_BAR_POSTFLUSH;
q->end_flush_fn(q, flush_rq);
@@ -408,8 +410,6 @@ struct request *blk_start_pre_flush(request_queue_t *q, struct request *rq)
if (!list_empty(&rq->queuelist))
blkdev_dequeue_request(rq);
- elv_deactivate_request(q, rq);
-
flush_rq->end_io_data = rq;
flush_rq->end_io = blk_pre_flush_end_io;
@@ -1040,6 +1040,7 @@ EXPORT_SYMBOL(blk_queue_invalidate_tags);
static char *rq_flags[] = {
"REQ_RW",
"REQ_FAILFAST",
+ "REQ_SORTED",
"REQ_SOFTBARRIER",
"REQ_HARDBARRIER",
"REQ_CMD",
@@ -1047,6 +1048,7 @@ static char *rq_flags[] = {
"REQ_STARTED",
"REQ_DONTPREP",
"REQ_QUEUED",
+ "REQ_ELVPRIV",
"REQ_PC",
"REQ_BLOCK_PC",
"REQ_SENSE",
@@ -1637,9 +1639,9 @@ static int blk_init_free_list(request_queue_t *q)
rl->count[READ] = rl->count[WRITE] = 0;
rl->starved[READ] = rl->starved[WRITE] = 0;
+ rl->elvpriv = 0;
init_waitqueue_head(&rl->wait[READ]);
init_waitqueue_head(&rl->wait[WRITE]);
- init_waitqueue_head(&rl->drain);
rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
mempool_free_slab, request_cachep, q->node);
@@ -1652,13 +1654,13 @@ static int blk_init_free_list(request_queue_t *q)
static int __make_request(request_queue_t *, struct bio *);
-request_queue_t *blk_alloc_queue(int gfp_mask)
+request_queue_t *blk_alloc_queue(gfp_t gfp_mask)
{
return blk_alloc_queue_node(gfp_mask, -1);
}
EXPORT_SYMBOL(blk_alloc_queue);
-request_queue_t *blk_alloc_queue_node(int gfp_mask, int node_id)
+request_queue_t *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
{
request_queue_t *q;
@@ -1782,12 +1784,14 @@ EXPORT_SYMBOL(blk_get_queue);
static inline void blk_free_request(request_queue_t *q, struct request *rq)
{
- elv_put_request(q, rq);
+ if (rq->flags & REQ_ELVPRIV)
+ elv_put_request(q, rq);
mempool_free(rq, q->rq.rq_pool);
}
static inline struct request *
-blk_alloc_request(request_queue_t *q, int rw, struct bio *bio, int gfp_mask)
+blk_alloc_request(request_queue_t *q, int rw, struct bio *bio,
+ int priv, gfp_t gfp_mask)
{
struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
@@ -1800,11 +1804,15 @@ blk_alloc_request(request_queue_t *q, int rw, struct bio *bio, int gfp_mask)
*/
rq->flags = rw;
- if (!elv_set_request(q, rq, bio, gfp_mask))
- return rq;
+ if (priv) {
+ if (unlikely(elv_set_request(q, rq, bio, gfp_mask))) {
+ mempool_free(rq, q->rq.rq_pool);
+ return NULL;
+ }
+ rq->flags |= REQ_ELVPRIV;
+ }
- mempool_free(rq, q->rq.rq_pool);
- return NULL;
+ return rq;
}
/*
@@ -1860,22 +1868,18 @@ static void __freed_request(request_queue_t *q, int rw)
* A request has just been released. Account for it, update the full and
* congestion status, wake up any waiters. Called under q->queue_lock.
*/
-static void freed_request(request_queue_t *q, int rw)
+static void freed_request(request_queue_t *q, int rw, int priv)
{
struct request_list *rl = &q->rq;
rl->count[rw]--;
+ if (priv)
+ rl->elvpriv--;
__freed_request(q, rw);
if (unlikely(rl->starved[rw ^ 1]))
__freed_request(q, rw ^ 1);
-
- if (!rl->count[READ] && !rl->count[WRITE]) {
- smp_mb();
- if (unlikely(waitqueue_active(&rl->drain)))
- wake_up(&rl->drain);
- }
}
#define blkdev_free_rq(list) list_entry((list)->next, struct request, queuelist)
@@ -1885,14 +1889,12 @@ static void freed_request(request_queue_t *q, int rw)
* Returns !NULL on success, with queue_lock *not held*.
*/
static struct request *get_request(request_queue_t *q, int rw, struct bio *bio,
- int gfp_mask)
+ gfp_t gfp_mask)
{
struct request *rq = NULL;
struct request_list *rl = &q->rq;
struct io_context *ioc = current_io_context(GFP_ATOMIC);
-
- if (unlikely(test_bit(QUEUE_FLAG_DRAIN, &q->queue_flags)))
- goto out;
+ int priv;
if (rl->count[rw]+1 >= q->nr_requests) {
/*
@@ -1937,9 +1939,14 @@ get_rq:
rl->starved[rw] = 0;
if (rl->count[rw] >= queue_congestion_on_threshold(q))
set_queue_congested(q, rw);
+
+ priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
+ if (priv)
+ rl->elvpriv++;
+
spin_unlock_irq(q->queue_lock);
- rq = blk_alloc_request(q, rw, bio, gfp_mask);
+ rq = blk_alloc_request(q, rw, bio, priv, gfp_mask);
if (!rq) {
/*
* Allocation failed presumably due to memory. Undo anything
@@ -1949,7 +1956,7 @@ get_rq:
* wait queue, but this is pretty rare.
*/
spin_lock_irq(q->queue_lock);
- freed_request(q, rw);
+ freed_request(q, rw, priv);
/*
* in the very unlikely event that allocation failed and no
@@ -2019,7 +2026,7 @@ static struct request *get_request_wait(request_queue_t *q, int rw,
return rq;
}
-struct request *blk_get_request(request_queue_t *q, int rw, int gfp_mask)
+struct request *blk_get_request(request_queue_t *q, int rw, gfp_t gfp_mask)
{
struct request *rq;
@@ -2251,7 +2258,7 @@ EXPORT_SYMBOL(blk_rq_unmap_user);
* @gfp_mask: memory allocation flags
*/
int blk_rq_map_kern(request_queue_t *q, struct request *rq, void *kbuf,
- unsigned int len, unsigned int gfp_mask)
+ unsigned int len, gfp_t gfp_mask)
{
struct bio *bio;
@@ -2433,13 +2440,15 @@ void disk_round_stats(struct gendisk *disk)
{
unsigned long now = jiffies;
- __disk_stat_add(disk, time_in_queue,
- disk->in_flight * (now - disk->stamp));
- disk->stamp = now;
+ if (now == disk->stamp)
+ return;
- if (disk->in_flight)
- __disk_stat_add(disk, io_ticks, (now - disk->stamp_idle));
- disk->stamp_idle = now;
+ if (disk->in_flight) {
+ __disk_stat_add(disk, time_in_queue,
+ disk->in_flight * (now - disk->stamp));
+ __disk_stat_add(disk, io_ticks, (now - disk->stamp));
+ }
+ disk->stamp = now;
}
/*
@@ -2454,6 +2463,8 @@ static void __blk_put_request(request_queue_t *q, struct request *req)
if (unlikely(--req->ref_count))
return;
+ elv_completed_request(q, req);
+
req->rq_status = RQ_INACTIVE;
req->rl = NULL;
@@ -2463,26 +2474,25 @@ static void __blk_put_request(request_queue_t *q, struct request *req)
*/
if (rl) {
int rw = rq_data_dir(req);
-
- elv_completed_request(q, req);
+ int priv = req->flags & REQ_ELVPRIV;
BUG_ON(!list_empty(&req->queuelist));
blk_free_request(q, req);
- freed_request(q, rw);
+ freed_request(q, rw, priv);
}
}
void blk_put_request(struct request *req)
{
+ unsigned long flags;
+ request_queue_t *q = req->q;
+
/*
- * if req->rl isn't set, this request didnt originate from the
- * block layer, so it's safe to just disregard it
+ * Gee, IDE calls in w/ NULL q. Fix IDE and remove the
+ * following if (q) test.
*/
- if (req->rl) {
- unsigned long flags;
- request_queue_t *q = req->q;
-
+ if (q) {
spin_lock_irqsave(q->queue_lock, flags);
__blk_put_request(q, req);
spin_unlock_irqrestore(q->queue_lock, flags);
@@ -2797,97 +2807,6 @@ static inline void blk_partition_remap(struct bio *bio)
}
}
-void blk_finish_queue_drain(request_queue_t *q)
-{
- struct request_list *rl = &q->rq;
- struct request *rq;
- int requeued = 0;
-
- spin_lock_irq(q->queue_lock);
- clear_bit(QUEUE_FLAG_DRAIN, &q->queue_flags);
-
- while (!list_empty(&q->drain_list)) {
- rq = list_entry_rq(q->drain_list.next);
-
- list_del_init(&rq->queuelist);
- elv_requeue_request(q, rq);
- requeued++;
- }
-
- if (requeued)
- q->request_fn(q);
-
- spin_unlock_irq(q->queue_lock);
-
- wake_up(&rl->wait[0]);
- wake_up(&rl->wait[1]);
- wake_up(&rl->drain);
-}
-
-static int wait_drain(request_queue_t *q, struct request_list *rl, int dispatch)
-{
- int wait = rl->count[READ] + rl->count[WRITE];
-
- if (dispatch)
- wait += !list_empty(&q->queue_head);
-
- return wait;
-}
-
-/*
- * We rely on the fact that only requests allocated through blk_alloc_request()
- * have io scheduler private data structures associated with them. Any other
- * type of request (allocated on stack or through kmalloc()) should not go
- * to the io scheduler core, but be attached to the queue head instead.
- */
-void blk_wait_queue_drained(request_queue_t *q, int wait_dispatch)
-{
- struct request_list *rl = &q->rq;
- DEFINE_WAIT(wait);
-
- spin_lock_irq(q->queue_lock);
- set_bit(QUEUE_FLAG_DRAIN, &q->queue_flags);
-
- while (wait_drain(q, rl, wait_dispatch)) {
- prepare_to_wait(&rl->drain, &wait, TASK_UNINTERRUPTIBLE);
-
- if (wait_drain(q, rl, wait_dispatch)) {
- __generic_unplug_device(q);
- spin_unlock_irq(q->queue_lock);
- io_schedule();
- spin_lock_irq(q->queue_lock);
- }
-
- finish_wait(&rl->drain, &wait);
- }
-
- spin_unlock_irq(q->queue_lock);
-}
-
-/*
- * block waiting for the io scheduler being started again.
- */
-static inline void block_wait_queue_running(request_queue_t *q)
-{
- DEFINE_WAIT(wait);
-
- while (unlikely(test_bit(QUEUE_FLAG_DRAIN, &q->queue_flags))) {
- struct request_list *rl = &q->rq;
-
- prepare_to_wait_exclusive(&rl->drain, &wait,
- TASK_UNINTERRUPTIBLE);
-
- /*
- * re-check the condition. avoids using prepare_to_wait()
- * in the fast path (queue is running)
- */
- if (test_bit(QUEUE_FLAG_DRAIN, &q->queue_flags))
- io_schedule();
-
- finish_wait(&rl->drain, &wait);
- }
-}
-
static void handle_bad_sector(struct bio *bio)
{
char b[BDEVNAME_SIZE];
@@ -2983,8 +2902,6 @@ end_io:
if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
goto end_io;
- block_wait_queue_running(q);
-
/*
* If this device has partitions, remap block n
* of partition p to block n+start(p) of the disk.
@@ -3393,7 +3310,7 @@ void exit_io_context(void)
* but since the current task itself holds a reference, the context can be
* used in general code, so long as it stays within `current` context.
*/
-struct io_context *current_io_context(int gfp_flags)
+struct io_context *current_io_context(gfp_t gfp_flags)
{
struct task_struct *tsk = current;
struct io_context *ret;
@@ -3424,7 +3341,7 @@ EXPORT_SYMBOL(current_io_context);
*
* This is always called in the context of the task which submitted the I/O.
*/
-struct io_context *get_io_context(int gfp_flags)
+struct io_context *get_io_context(gfp_t gfp_flags)
{
struct io_context *ret;
ret = current_io_context(gfp_flags);
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index b35e08876dd..96c664af8d0 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -881,7 +881,7 @@ loop_init_xfer(struct loop_device *lo, struct loop_func_table *xfer,
static int loop_clr_fd(struct loop_device *lo, struct block_device *bdev)
{
struct file *filp = lo->lo_backing_file;
- int gfp = lo->old_gfp_mask;
+ gfp_t gfp = lo->old_gfp_mask;
if (lo->lo_state != Lo_bound)
return -ENXIO;
diff --git a/drivers/block/noop-iosched.c b/drivers/block/noop-iosched.c
index b1730b62c37..f56b8edb06e 100644
--- a/drivers/block/noop-iosched.c
+++ b/drivers/block/noop-iosched.c
@@ -7,57 +7,19 @@
#include <linux/module.h>
#include <linux/init.h>
-/*
- * See if we can find a request that this buffer can be coalesced with.
- */
-static int elevator_noop_merge(request_queue_t *q, struct request **req,
- struct bio *bio)
-{
- int ret;
-
- ret = elv_try_last_merge(q, bio);
- if (ret != ELEVATOR_NO_MERGE)
- *req = q->last_merge;
-
- return ret;
-}
-
-static void elevator_noop_merge_requests(request_queue_t *q, struct request *req,
- struct request *next)
-{
- list_del_init(&next->queuelist);
-}
-
-static void elevator_noop_add_request(request_queue_t *q, struct request *rq,
- int where)
+static void elevator_noop_add_request(request_queue_t *q, struct request *rq)
{
- if (where == ELEVATOR_INSERT_FRONT)
- list_add(&rq->queuelist, &q->queue_head);
- else
- list_add_tail(&rq->queuelist, &q->queue_head);
-
- /*
- * new merges must not precede this barrier
- */
- if (rq->flags & REQ_HARDBARRIER)
- q->last_merge = NULL;
- else if (!q->last_merge)
- q->last_merge = rq;
+ elv_dispatch_add_tail(q, rq);
}
-static struct request *elevator_noop_next_request(request_queue_t *q)
+static int elevator_noop_dispatch(request_queue_t *q, int force)
{
- if (!list_empty(&q->queue_head))
- return list_entry_rq(q->queue_head.next);
-
- return NULL;
+ return 0;
}
static struct elevator_type elevator_noop = {
.ops = {
- .elevator_merge_fn = elevator_noop_merge,
- .elevator_merge_req_fn = elevator_noop_merge_requests,
- .elevator_next_req_fn = elevator_noop_next_request,
+ .elevator_dispatch_fn = elevator_noop_dispatch,
.elevator_add_req_fn = elevator_noop_add_request,
},
.elevator_name = "noop",
diff --git a/drivers/block/rd.c b/drivers/block/rd.c
index 145c1fbffe0..68c60a5bcda 100644
--- a/drivers/block/rd.c
+++ b/drivers/block/rd.c
@@ -348,7 +348,7 @@ static int rd_open(struct inode *inode, struct file *filp)
struct block_device *bdev = inode->i_bdev;
struct address_space *mapping;
unsigned bsize;
- int gfp_mask;
+ gfp_t gfp_mask;
inode = igrab(bdev->bd_inode);
rd_bdev[unit] = bdev;
diff --git a/drivers/char/drm/drm_vm.c b/drivers/char/drm/drm_vm.c
index ced4215e227..39ea96e42c5 100644
--- a/drivers/char/drm/drm_vm.c
+++ b/drivers/char/drm/drm_vm.c
@@ -148,7 +148,8 @@ static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma,
offset = address - vma->vm_start;
i = (unsigned long)map->handle + offset;
- page = vmalloc_to_page((void *)i);
+ page = (map->type == _DRM_CONSISTENT) ?
+ virt_to_page((void *)i) : vmalloc_to_page((void *)i);
if (!page)
return NOPAGE_OOM;
get_page(page);
diff --git a/drivers/char/drm/mga_drv.h b/drivers/char/drm/mga_drv.h
index b22fdbd4f83..6059c5a5b10 100644
--- a/drivers/char/drm/mga_drv.h
+++ b/drivers/char/drm/mga_drv.h
@@ -227,7 +227,7 @@ static inline u32 _MGA_READ(u32 *addr)
#define MGA_EMIT_STATE( dev_priv, dirty ) \
do { \
if ( (dirty) & ~MGA_UPLOAD_CLIPRECTS ) { \
- if ( dev_priv->chipset == MGA_CARD_TYPE_G400 ) { \
+ if ( dev_priv->chipset >= MGA_CARD_TYPE_G400 ) { \
mga_g400_emit_state( dev_priv ); \
} else { \
mga_g200_emit_state( dev_priv ); \
diff --git a/drivers/char/drm/mga_state.c b/drivers/char/drm/mga_state.c
index 05bbb471937..6ac5e006226 100644
--- a/drivers/char/drm/mga_state.c
+++ b/drivers/char/drm/mga_state.c
@@ -53,7 +53,7 @@ static void mga_emit_clip_rect( drm_mga_private_t *dev_priv,
/* Force reset of DWGCTL on G400 (eliminates clip disable bit).
*/
- if (dev_priv->chipset == MGA_CARD_TYPE_G400) {
+ if (dev_priv->chipset >= MGA_CARD_TYPE_G400) {
DMA_BLOCK(MGA_DWGCTL, ctx->dwgctl,
MGA_LEN + MGA_EXEC, 0x80000000,
MGA_DWGCTL, ctx->dwgctl,
diff --git a/drivers/char/drm/radeon_cp.c b/drivers/char/drm/radeon_cp.c
index 6d9080a3ca7..12ef13ff04c 100644
--- a/drivers/char/drm/radeon_cp.c
+++ b/drivers/char/drm/radeon_cp.c
@@ -1133,10 +1133,10 @@ static void radeon_cp_init_ring_buffer( drm_device_t *dev,
ring_start = (dev_priv->cp_ring->offset
- dev->agp->base
+ dev_priv->gart_vm_start);
- } else
+ } else
#endif
ring_start = (dev_priv->cp_ring->offset
- - dev->sg->handle
+ - (unsigned long)dev->sg->virtual
+ dev_priv->gart_vm_start);
RADEON_WRITE( RADEON_CP_RB_BASE, ring_start );
@@ -1164,7 +1164,8 @@ static void radeon_cp_init_ring_buffer( drm_device_t *dev,
drm_sg_mem_t *entry = dev->sg;
unsigned long tmp_ofs, page_ofs;
- tmp_ofs = dev_priv->ring_rptr->offset - dev->sg->handle;
+ tmp_ofs = dev_priv->ring_rptr->offset -
+ (unsigned long)dev->sg->virtual;
page_ofs = tmp_ofs >> PAGE_SHIFT;
RADEON_WRITE( RADEON_CP_RB_RPTR_ADDR,
@@ -1491,8 +1492,8 @@ static int radeon_do_init_cp( drm_device_t *dev, drm_radeon_init_t *init )
else
#endif
dev_priv->gart_buffers_offset = (dev->agp_buffer_map->offset
- - dev->sg->handle
- + dev_priv->gart_vm_start);
+ - (unsigned long)dev->sg->virtual
+ + dev_priv->gart_vm_start);
DRM_DEBUG( "dev_priv->gart_size %d\n",
dev_priv->gart_size );
diff --git a/drivers/char/n_tty.c b/drivers/char/n_tty.c
index c9bdf544ed2..c556f4d3ccd 100644
--- a/drivers/char/n_tty.c
+++ b/drivers/char/n_tty.c
@@ -62,7 +62,7 @@
static inline unsigned char *alloc_buf(void)
{
- unsigned int prio = in_interrupt() ? GFP_ATOMIC : GFP_KERNEL;
+ gfp_t prio = in_interrupt() ? GFP_ATOMIC : GFP_KERNEL;
if (PAGE_SIZE != N_TTY_BUF_SIZE)
return kmalloc(N_TTY_BUF_SIZE, prio);
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index e1df376e709..2ed5c4363b5 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -315,9 +315,9 @@ static void dbs_check_cpu(int cpu)
policy = this_dbs_info->cur_policy;
if ( init_flag == 0 ) {
- for ( /* NULL */; init_flag < NR_CPUS; init_flag++ ) {
- dbs_info = &per_cpu(cpu_dbs_info, init_flag);
- requested_freq[cpu] = dbs_info->cur_policy->cur;
+ for_each_online_cpu(j) {
+ dbs_info = &per_cpu(cpu_dbs_info, j);
+ requested_freq[j] = dbs_info->cur_policy->cur;
}
init_flag = 1;
}
diff --git a/drivers/ieee1394/eth1394.c b/drivers/ieee1394/eth1394.c
index 4802bbbb6dc..c9e92d85c89 100644
--- a/drivers/ieee1394/eth1394.c
+++ b/drivers/ieee1394/eth1394.c
@@ -1630,7 +1630,7 @@ static void ether1394_complete_cb(void *__ptask)
/* Transmit a packet (called by kernel) */
static int ether1394_tx (struct sk_buff *skb, struct net_device *dev)
{
- int kmflags = in_interrupt() ? GFP_ATOMIC : GFP_KERNEL;
+ gfp_t kmflags = in_interrupt() ? GFP_ATOMIC : GFP_KERNEL;
struct eth1394hdr *eth;
struct eth1394_priv *priv = netdev_priv(dev);
int proto;
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
index f6a8ac02655..378646b5a1b 100644
--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
@@ -524,7 +524,7 @@ void mthca_cmd_use_polling(struct mthca_dev *dev)
}
struct mthca_mailbox *mthca_alloc_mailbox(struct mthca_dev *dev,
- unsigned int gfp_mask)
+ gfp_t gfp_mask)
{
struct mthca_mailbox *mailbox;
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.h b/drivers/infiniband/hw/mthca/mthca_cmd.h
index 65f976a13e0..18175bec84c 100644
--- a/drivers/infiniband/hw/mthca/mthca_cmd.h
+++ b/drivers/infiniband/hw/mthca/mthca_cmd.h
@@ -248,7 +248,7 @@ void mthca_cmd_event(struct mthca_dev *dev, u16 token,
u8 status, u64 out_param);
struct mthca_mailbox *mthca_alloc_mailbox(struct mthca_dev *dev,
- unsigned int gfp_mask);
+ gfp_t gfp_mask);
void mthca_free_mailbox(struct mthca_dev *dev, struct mthca_mailbox *mailbox);
int mthca_SYS_EN(struct mthca_dev *dev, u8 *status);
diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c
index c81fa8e975e..8dfafda5ed2 100644
--- a/drivers/infiniband/hw/mthca/mthca_eq.c
+++ b/drivers/infiniband/hw/mthca/mthca_eq.c
@@ -396,20 +396,21 @@ static irqreturn_t mthca_tavor_interrupt(int irq, void *dev_ptr, struct pt_regs
writel(dev->eq_table.clr_mask, dev->eq_table.clr_int);
ecr = readl(dev->eq_regs.tavor.ecr_base + 4);
- if (ecr) {
- writel(ecr, dev->eq_regs.tavor.ecr_base +
- MTHCA_ECR_CLR_BASE - MTHCA_ECR_BASE + 4);
+ if (!ecr)
+ return IRQ_NONE;
- for (i = 0; i < MTHCA_NUM_EQ; ++i)
- if (ecr & dev->eq_table.eq[i].eqn_mask &&
- mthca_eq_int(dev, &dev->eq_table.eq[i])) {
+ writel(ecr, dev->eq_regs.tavor.ecr_base +
+ MTHCA_ECR_CLR_BASE - MTHCA_ECR_BASE + 4);
+
+ for (i = 0; i < MTHCA_NUM_EQ; ++i)
+ if (ecr & dev->eq_table.eq[i].eqn_mask) {
+ if (mthca_eq_int(dev, &dev->eq_table.eq[i]))
tavor_set_eq_ci(dev, &dev->eq_table.eq[i],
dev->eq_table.eq[i].cons_index);
- tavor_eq_req_not(dev, dev->eq_table.eq[i].eqn);
- }
- }
+ tavor_eq_req_not(dev, dev->eq_table.eq[i].eqn);
+ }
- return IRQ_RETVAL(ecr);
+ return IRQ_HANDLED;
}
static irqreturn_t mthca_tavor_msi_x_interrupt(int irq, void *eq_ptr,
diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.c b/drivers/infiniband/hw/mthca/mthca_memfree.c
index 7bd7a4bec7b..9ad8b3b6cfe 100644
--- a/drivers/infiniband/hw/mthca/mthca_memfree.c
+++ b/drivers/infiniband/hw/mthca/mthca_memfree.c
@@ -82,7 +82,7 @@ void mthca_free_icm(struct mthca_dev *dev, struct mthca_icm *icm)
}
struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages,
- unsigned int gfp_mask)
+ gfp_t gfp_mask)
{
struct mthca_icm *icm;
struct mthca_icm_chunk *chunk = NULL;
diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.h b/drivers/infiniband/hw/mthca/mthca_memfree.h
index bafa51544aa..29433f29525 100644
--- a/drivers/infiniband/hw/mthca/mthca_memfree.h
+++ b/drivers/infiniband/hw/mthca/mthca_memfree.h
@@ -77,7 +77,7 @@ struct mthca_icm_iter {
struct mthca_dev;
struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages,
- unsigned int gfp_mask);
+ gfp_t gfp_mask);
void mthca_free_icm(struct mthca_dev *dev, struct mthca_icm *icm);
struct mthca_icm_table *mthca_alloc_icm_table(struct mthca_dev *dev,
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 2fba2bbe72d..01654fcabc5 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -91,7 +91,7 @@ int bitmap_active(struct bitmap *bitmap)
#define WRITE_POOL_SIZE 256
/* mempool for queueing pending writes on the bitmap file */
-static void *write_pool_alloc(unsigned int gfp_flags, void *data)
+static void *write_pool_alloc(gfp_t gfp_flags, void *data)
{
return kmalloc(sizeof(struct page_list), gfp_flags);
}
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index b6148f6f783..28c1a628621 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -331,7 +331,7 @@ crypt_alloc_buffer(struct crypt_config *cc, unsigned int size,
{
struct bio *bio;
unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
- int gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
+ gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
unsigned int i;
/*
diff --git a/drivers/md/md.c b/drivers/md/md.c
index e9476075aa1..2a8a5696bf8 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -3568,7 +3568,8 @@ static void md_do_sync(mddev_t *mddev)
mddev->curr_resync = 2;
try_again:
- if (signal_pending(current)) {
+ if (signal_pending(current) ||
+ kthread_should_stop()) {
flush_signals(current);
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
goto skip;
@@ -3590,8 +3591,9 @@ static void md_do_sync(mddev_t *mddev)
*/
continue;
prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE);
- if (!signal_pending(current)
- && mddev2->curr_resync >= mddev->curr_resync) {
+ if (!signal_pending(current) &&
+ !kthread_should_stop() &&
+ mddev2->curr_resync >= mddev->curr_resync) {
printk(KERN_INFO "md: delaying resync of %s"
" until %s has finished resync (they"
" share one or more physical units)\n",
@@ -3697,7 +3699,7 @@ static void md_do_sync(mddev_t *mddev)
}
- if (signal_pending(current)) {
+ if (signal_pending(current) || kthread_should_stop()) {
/*
* got a signal, exit.
*/
diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig
index 93570355819..bbb989df4cf 100644
--- a/drivers/media/video/Kconfig
+++ b/drivers/media/video/Kconfig
@@ -262,7 +262,6 @@ config VIDEO_SAA7134_DVB
depends on VIDEO_SAA7134 && DVB_CORE
select VIDEO_BUF_DVB
select DVB_MT352
- select DVB_CX22702
select DVB_TDA1004X
---help---
This adds support for DVB cards based on the
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 429820e48c6..7de19a84dc7 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -257,8 +257,8 @@ static void mptsas_print_device_pg0(SasDevicePage0_t *pg0)
printk("SAS Address=0x%llX\n", le64_to_cpu(sas_address));
printk("Target ID=0x%X\n", pg0->TargetID);
printk("Bus=0x%X\n", pg0->Bus);
- printk("PhyNum=0x%X\n", pg0->PhyNum);
- printk("AccessStatus=0x%X\n", le16_to_cpu(pg0->AccessStatus));
+ printk("Parent Phy Num=0x%X\n", pg0->PhyNum);
+ printk("Access Status=0x%X\n", le16_to_cpu(pg0->AccessStatus));
printk("Device Info=0x%X\n", le32_to_cpu(pg0->DeviceInfo));
printk("Flags=0x%X\n", le16_to_cpu(pg0->Flags));
printk("Physical Port=0x%X\n", pg0->PhysicalPort);
@@ -270,7 +270,7 @@ static void mptsas_print_expander_pg1(SasExpanderPage1_t *pg1)
printk("---- SAS EXPANDER PAGE 1 ------------\n");
printk("Physical Port=0x%X\n", pg1->PhysicalPort);
- printk("PHY Identifier=0x%X\n", pg1->Phy);
+ printk("PHY Identifier=0x%X\n", pg1->PhyIdentifier);
printk("Negotiated Link Rate=0x%X\n", pg1->NegotiatedLinkRate);
printk("Programmed Link Rate=0x%X\n", pg1->ProgrammedLinkRate);
printk("Hardware Link Rate=0x%X\n", pg1->HwLinkRate);
@@ -604,7 +604,7 @@ mptsas_sas_expander_pg1(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info,
mptsas_print_expander_pg1(buffer);
/* save config data */
- phy_info->phy_id = buffer->Phy;
+ phy_info->phy_id = buffer->PhyIdentifier;
phy_info->port_id = buffer->PhysicalPort;
phy_info->negotiated_link_rate = buffer->NegotiatedLinkRate;
phy_info->programmed_link_rate = buffer->ProgrammedLinkRate;
@@ -825,6 +825,8 @@ mptsas_probe_hba_phys(MPT_ADAPTER *ioc, int *index)
mptsas_sas_device_pg0(ioc, &port_info->phy_info[i].identify,
(MPI_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE <<
MPI_SAS_DEVICE_PGAD_FORM_SHIFT), handle);
+ port_info->phy_info[i].identify.phy_id =
+ port_info->phy_info[i].phy_id;
handle = port_info->phy_info[i].identify.handle;
if (port_info->phy_info[i].attached.handle) {
@@ -881,6 +883,8 @@ mptsas_probe_expander_phys(MPT_ADAPTER *ioc, u32 *handle, int *index)
(MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
port_info->phy_info[i].identify.handle);
+ port_info->phy_info[i].identify.phy_id =
+ port_info->phy_info[i].phy_id;
}
if (port_info->phy_info[i].attached.handle) {
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index bc537440ca0..f822cd3025f 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -1027,8 +1027,7 @@ static void cp_reset_hw (struct cp_private *cp)
if (!(cpr8(Cmd) & CmdReset))
return;
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(10);
+ schedule_timeout_uninterruptible(10);
}
printk(KERN_ERR "%s: hardware reset timeout\n", cp->dev->name);
@@ -1575,6 +1574,7 @@ static struct ethtool_ops cp_ethtool_ops = {
.set_wol = cp_set_wol,
.get_strings = cp_get_strings,
.get_ethtool_stats = cp_get_ethtool_stats,
+ .get_perm_addr = ethtool_op_get_perm_addr,
};
static int cp_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
@@ -1773,6 +1773,7 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
for (i = 0; i < 3; i++)
((u16 *) (dev->dev_addr))[i] =
le16_to_cpu (read_eeprom (regs, i + 7, addr_len));
+ memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
dev->open = cp_open;
dev->stop = cp_close;
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index 4c2cf7bbd25..30bee11c48b 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -552,7 +552,8 @@ const static struct {
{ "RTL-8100B/8139D",
HW_REVID(1, 1, 1, 0, 1, 0, 1),
- HasLWake,
+ HasHltClk /* XXX undocumented? */
+ | HasLWake,
},
{ "RTL-8101",
@@ -970,6 +971,7 @@ static int __devinit rtl8139_init_one (struct pci_dev *pdev,
for (i = 0; i < 3; i++)
((u16 *) (dev->dev_addr))[i] =
le16_to_cpu (read_eeprom (ioaddr, i + 7, addr_len));
+ memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
/* The Rtl8139-specific entries in the device structure. */
dev->open = rtl8139_open;
@@ -2465,6 +2467,7 @@ static struct ethtool_ops rtl8139_ethtool_ops = {
.get_strings = rtl8139_get_strings,
.get_stats_count = rtl8139_get_stats_count,
.get_ethtool_stats = rtl8139_get_ethtool_stats,
+ .get_perm_addr = ethtool_op_get_perm_addr,
};
static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index c748b0e1641..5148d47492a 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -475,6 +475,14 @@ config SGI_IOC3_ETH_HW_TX_CSUM
the moment only acceleration of IPv4 is supported. This option
enables offloading for checksums on transmit. If unsure, say Y.
+config MIPS_SIM_NET
+ tristate "MIPS simulator Network device (EXPERIMENTAL)"
+ depends on NETDEVICES && MIPS_SIM && EXPERIMENTAL
+ help
+ The MIPSNET device is a simple Ethernet network device which is
+ emulated by the MIPS Simulator.
+ If you are not using a MIPSsim or are unsure, say N.
+
config SGI_O2MACE_ETH
tristate "SGI O2 MACE Fast Ethernet support"
depends on NET_ETHERNET && SGI_IP32=y
@@ -2083,6 +2091,7 @@ config SPIDER_NET
config GIANFAR
tristate "Gianfar Ethernet"
depends on 85xx || 83xx
+ select PHYLIB
help
This driver supports the Gigabit TSEC on the MPC85xx
family of chips, and the FEC on the 8540
@@ -2243,6 +2252,20 @@ config ISERIES_VETH
tristate "iSeries Virtual Ethernet driver support"
depends on PPC_ISERIES
+config RIONET
+ tristate "RapidIO Ethernet over messaging driver support"
+ depends on NETDEVICES && RAPIDIO
+
+config RIONET_TX_SIZE
+ int "Number of outbound queue entries"
+ depends on RIONET
+ default "128"
+
+config RIONET_RX_SIZE
+ int "Number of inbound queue entries"
+ depends on RIONET
+ default "128"
+
config FDDI
bool "FDDI driver support"
depends on (PCI || EISA)
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 8aeec9f2495..1a84e0435f6 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -13,7 +13,7 @@ obj-$(CONFIG_CHELSIO_T1) += chelsio/
obj-$(CONFIG_BONDING) += bonding/
obj-$(CONFIG_GIANFAR) += gianfar_driver.o
-gianfar_driver-objs := gianfar.o gianfar_ethtool.o gianfar_phy.o
+gianfar_driver-objs := gianfar.o gianfar_ethtool.o gianfar_mii.o
#
# link order important here
@@ -64,6 +64,7 @@ obj-$(CONFIG_SKFP) += skfp/
obj-$(CONFIG_VIA_RHINE) += via-rhine.o
obj-$(CONFIG_VIA_VELOCITY) += via-velocity.o
obj-$(CONFIG_ADAPTEC_STARFIRE) += starfire.o
+obj-$(CONFIG_RIONET) += rionet.o
#
# end link order section
@@ -166,6 +167,7 @@ obj-$(CONFIG_EQUALIZER) += eql.o
obj-$(CONFIG_MIPS_JAZZ_SONIC) += jazzsonic.o
obj-$(CONFIG_MIPS_GT96100ETH) += gt96100eth.o
obj-$(CONFIG_MIPS_AU1X00_ENET) += au1000_eth.o
+obj-$(CONFIG_MIPS_SIM_NET) += mipsnet.o
obj-$(CONFIG_SGI_IOC3_ETH) += ioc3-eth.o
obj-$(CONFIG_DECLANCE) += declance.o
obj-$(CONFIG_ATARILANCE) += atarilance.o
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c
index c82b9cd1c92..78506911d65 100644
--- a/drivers/net/au1000_eth.c
+++ b/drivers/net/au1000_eth.c
@@ -151,13 +151,6 @@ struct au1000_private *au_macs[NUM_ETH_INTERFACES];
SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | \
SUPPORTED_Autoneg
-static char *phy_link[] =
-{ "unknown",
- "10Base2", "10BaseT",
- "AUI",
- "100BaseT", "100BaseTX", "100BaseFX"
-};
-
int bcm_5201_init(struct net_device *dev, int phy_addr)
{
s16 data;
@@ -785,6 +778,7 @@ static struct mii_chip_info {
{"Broadcom BCM5201 10/100 BaseT PHY",0x0040,0x6212, &bcm_5201_ops,0},
{"Broadcom BCM5221 10/100 BaseT PHY",0x0040,0x61e4, &bcm_5201_ops,0},
{"Broadcom BCM5222 10/100 BaseT PHY",0x0040,0x6322, &bcm_5201_ops,1},
+ {"NS DP83847 PHY", 0x2000, 0x5c30, &bcm_5201_ops ,0},
{"AMD 79C901 HomePNA PHY",0x0000,0x35c8, &am79c901_ops,0},
{"AMD 79C874 10/100 BaseT PHY",0x0022,0x561b, &am79c874_ops,0},
{"LSI 80227 10/100 BaseT PHY",0x0016,0xf840, &lsi_80227_ops,0},
@@ -1045,7 +1039,7 @@ found:
#endif
if (aup->mii->chip_info == NULL) {
- printk(KERN_ERR "%s: Au1x No MII transceivers found!\n",
+ printk(KERN_ERR "%s: Au1x No known MII transceivers found!\n",
dev->name);
return -1;
}
@@ -1546,6 +1540,9 @@ au1000_probe(u32 ioaddr, int irq, int port_num)
printk(KERN_ERR "%s: out of memory\n", dev->name);
goto err_out;
}
+ aup->mii->next = NULL;
+ aup->mii->chip_info = NULL;
+ aup->mii->status = 0;
aup->mii->mii_control_reg = 0;
aup->mii->mii_data_reg = 0;
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index 94939f570f7..282ebd15f01 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -106,6 +106,29 @@ static int b44_poll(struct net_device *dev, int *budget);
static void b44_poll_controller(struct net_device *dev);
#endif
+static int dma_desc_align_mask;
+static int dma_desc_sync_size;
+
+static inline void b44_sync_dma_desc_for_device(struct pci_dev *pdev,
+ dma_addr_t dma_base,
+ unsigned long offset,
+ enum dma_data_direction dir)
+{
+ dma_sync_single_range_for_device(&pdev->dev, dma_base,
+ offset & dma_desc_align_mask,
+ dma_desc_sync_size, dir);
+}
+
+static inline void b44_sync_dma_desc_for_cpu(struct pci_dev *pdev,
+ dma_addr_t dma_base,
+ unsigned long offset,
+ enum dma_data_direction dir)
+{
+ dma_sync_single_range_for_cpu(&pdev->dev, dma_base,
+ offset & dma_desc_align_mask,
+ dma_desc_sync_size, dir);
+}
+
static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
{
return readl(bp->regs + reg);
@@ -668,6 +691,11 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
dp->ctrl = cpu_to_le32(ctrl);
dp->addr = cpu_to_le32((u32) mapping + bp->rx_offset + bp->dma_offset);
+ if (bp->flags & B44_FLAG_RX_RING_HACK)
+ b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma,
+ dest_idx * sizeof(dp),
+ DMA_BIDIRECTIONAL);
+
return RX_PKT_BUF_SZ;
}
@@ -692,6 +720,11 @@ static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
pci_unmap_addr_set(dest_map, mapping,
pci_unmap_addr(src_map, mapping));
+ if (bp->flags & B44_FLAG_RX_RING_HACK)
+ b44_sync_dma_desc_for_cpu(bp->pdev, bp->rx_ring_dma,
+ src_idx * sizeof(src_desc),
+ DMA_BIDIRECTIONAL);
+
ctrl = src_desc->ctrl;
if (dest_idx == (B44_RX_RING_SIZE - 1))
ctrl |= cpu_to_le32(DESC_CTRL_EOT);
@@ -700,8 +733,14 @@ static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
dest_desc->ctrl = ctrl;
dest_desc->addr = src_desc->addr;
+
src_map->skb = NULL;
+ if (bp->flags & B44_FLAG_RX_RING_HACK)
+ b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma,
+ dest_idx * sizeof(dest_desc),
+ DMA_BIDIRECTIONAL);
+
pci_dma_sync_single_for_device(bp->pdev, src_desc->addr,
RX_PKT_BUF_SZ,
PCI_DMA_FROMDEVICE);
@@ -959,6 +998,11 @@ static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl);
bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
+ if (bp->flags & B44_FLAG_TX_RING_HACK)
+ b44_sync_dma_desc_for_device(bp->pdev, bp->tx_ring_dma,
+ entry * sizeof(bp->tx_ring[0]),
+ DMA_TO_DEVICE);
+
entry = NEXT_TX(entry);
bp->tx_prod = entry;
@@ -1064,6 +1108,16 @@ static void b44_init_rings(struct b44 *bp)
memset(bp->rx_ring, 0, B44_RX_RING_BYTES);
memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
+ if (bp->flags & B44_FLAG_RX_RING_HACK)
+ dma_sync_single_for_device(&bp->pdev->dev, bp->rx_ring_dma,
+ DMA_TABLE_BYTES,
+ PCI_DMA_BIDIRECTIONAL);
+
+ if (bp->flags & B44_FLAG_TX_RING_HACK)
+ dma_sync_single_for_device(&bp->pdev->dev, bp->tx_ring_dma,
+ DMA_TABLE_BYTES,
+ PCI_DMA_TODEVICE);
+
for (i = 0; i < bp->rx_pending; i++) {
if (b44_alloc_rx_skb(bp, -1, i) < 0)
break;
@@ -1085,14 +1139,28 @@ static void b44_free_consistent(struct b44 *bp)
bp->tx_buffers = NULL;
}
if (bp->rx_ring) {
- pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
- bp->rx_ring, bp->rx_ring_dma);
+ if (bp->flags & B44_FLAG_RX_RING_HACK) {
+ dma_unmap_single(&bp->pdev->dev, bp->rx_ring_dma,
+ DMA_TABLE_BYTES,
+ DMA_BIDIRECTIONAL);
+ kfree(bp->rx_ring);
+ } else
+ pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
+ bp->rx_ring, bp->rx_ring_dma);
bp->rx_ring = NULL;
+ bp->flags &= ~B44_FLAG_RX_RING_HACK;
}
if (bp->tx_ring) {
- pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
- bp->tx_ring, bp->tx_ring_dma);
+ if (bp->flags & B44_FLAG_TX_RING_HACK) {
+ dma_unmap_single(&bp->pdev->dev, bp->tx_ring_dma,
+ DMA_TABLE_BYTES,
+ DMA_TO_DEVICE);
+ kfree(bp->tx_ring);
+ } else
+ pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
+ bp->tx_ring, bp->tx_ring_dma);
bp->tx_ring = NULL;
+ bp->flags &= ~B44_FLAG_TX_RING_HACK;
}
}
@@ -1118,12 +1186,56 @@ static int b44_alloc_consistent(struct b44 *bp)
size = DMA_TABLE_BYTES;
bp->rx_ring = pci_alloc_consistent(bp->pdev, size, &bp->rx_ring_dma);
- if (!bp->rx_ring)
- goto out_err;
+ if (!bp->rx_ring) {
+ /* Allocation may have failed due to pci_alloc_consistent
+ insisting on use of GFP_DMA, which is more restrictive
+ than necessary... */
+ struct dma_desc *rx_ring;
+ dma_addr_t rx_ring_dma;
+
+ if (!(rx_ring = (struct dma_desc *)kmalloc(size, GFP_KERNEL)))
+ goto out_err;
+
+ memset(rx_ring, 0, size);
+ rx_ring_dma = dma_map_single(&bp->pdev->dev, rx_ring,
+ DMA_TABLE_BYTES,
+ DMA_BIDIRECTIONAL);
+
+ if (rx_ring_dma + size > B44_DMA_MASK) {
+ kfree(rx_ring);
+ goto out_err;
+ }
+
+ bp->rx_ring = rx_ring;
+ bp->rx_ring_dma = rx_ring_dma;
+ bp->flags |= B44_FLAG_RX_RING_HACK;
+ }
bp->tx_ring = pci_alloc_consistent(bp->pdev, size, &bp->tx_ring_dma);
- if (!bp->tx_ring)
- goto out_err;
+ if (!bp->tx_ring) {
+ /* Allocation may have failed due to pci_alloc_consistent
+ insisting on use of GFP_DMA, which is more restrictive
+ than necessary... */
+ struct dma_desc *tx_ring;
+ dma_addr_t tx_ring_dma;
+
+ if (!(tx_ring = (struct dma_desc *)kmalloc(size, GFP_KERNEL)))
+ goto out_err;
+
+ memset(tx_ring, 0, size);
+ tx_ring_dma = dma_map_single(&bp->pdev->dev, tx_ring,
+ DMA_TABLE_BYTES,
+ DMA_TO_DEVICE);
+
+ if (tx_ring_dma + size > B44_DMA_MASK) {
+ kfree(tx_ring);
+ goto out_err;
+ }
+
+ bp->tx_ring = tx_ring;
+ bp->tx_ring_dma = tx_ring_dma;
+ bp->flags |= B44_FLAG_TX_RING_HACK;
+ }
return 0;
@@ -1676,6 +1788,7 @@ static struct ethtool_ops b44_ethtool_ops = {
.set_pauseparam = b44_set_pauseparam,
.get_msglevel = b44_get_msglevel,
.set_msglevel = b44_set_msglevel,
+ .get_perm_addr = ethtool_op_get_perm_addr,
};
static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
@@ -1718,6 +1831,7 @@ static int __devinit b44_get_invariants(struct b44 *bp)
bp->dev->dev_addr[3] = eeprom[80];
bp->dev->dev_addr[4] = eeprom[83];
bp->dev->dev_addr[5] = eeprom[82];
+ memcpy(bp->dev->perm_addr, bp->dev->dev_addr, bp->dev->addr_len);
bp->phy_addr = eeprom[90] & 0x1f;
@@ -1971,6 +2085,12 @@ static struct pci_driver b44_driver = {
static int __init b44_init(void)
{
+ unsigned int dma_desc_align_size = dma_get_cache_alignment();
+
+ /* Setup paramaters for syncing RX/TX DMA descriptors */
+ dma_desc_align_mask = ~(dma_desc_align_size - 1);
+ dma_desc_sync_size = max(dma_desc_align_size, sizeof(struct dma_desc));
+
return pci_module_init(&b44_driver);
}
diff --git a/drivers/net/b44.h b/drivers/net/b44.h
index 11c40a2e71c..593cb0ad410 100644
--- a/drivers/net/b44.h
+++ b/drivers/net/b44.h
@@ -400,6 +400,8 @@ struct b44 {
#define B44_FLAG_ADV_100HALF 0x04000000
#define B44_FLAG_ADV_100FULL 0x08000000
#define B44_FLAG_INTERNAL_PHY 0x10000000
+#define B44_FLAG_RX_RING_HACK 0x20000000
+#define B44_FLAG_TX_RING_HACK 0x40000000
u32 rx_offset;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index f264ff16297..8032126fd58 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -4241,6 +4241,43 @@ out:
return 0;
}
+static void bond_activebackup_xmit_copy(struct sk_buff *skb,
+ struct bonding *bond,
+ struct slave *slave)
+{
+ struct sk_buff *skb2 = skb_copy(skb, GFP_ATOMIC);
+ struct ethhdr *eth_data;
+ u8 *hwaddr;
+ int res;
+
+ if (!skb2) {
+ printk(KERN_ERR DRV_NAME ": Error: "
+ "bond_activebackup_xmit_copy(): skb_copy() failed\n");
+ return;
+ }
+
+ skb2->mac.raw = (unsigned char *)skb2->data;
+ eth_data = eth_hdr(skb2);
+
+ /* Pick an appropriate source MAC address
+ * -- use slave's perm MAC addr, unless used by bond
+ * -- otherwise, borrow active slave's perm MAC addr
+ * since that will not be used
+ */
+ hwaddr = slave->perm_hwaddr;
+ if (!memcmp(eth_data->h_source, hwaddr, ETH_ALEN))
+ hwaddr = bond->curr_active_slave->perm_hwaddr;
+
+ /* Set source MAC address appropriately */
+ memcpy(eth_data->h_source, hwaddr, ETH_ALEN);
+
+ res = bond_dev_queue_xmit(bond, skb2, slave->dev);
+ if (res)
+ dev_kfree_skb(skb2);
+
+ return;
+}
+
/*
* in active-backup mode, we know that bond->curr_active_slave is always valid if
* the bond has a usable interface.
@@ -4257,10 +4294,26 @@ static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *bond_d
goto out;
}
- if (bond->curr_active_slave) { /* one usable interface */
- res = bond_dev_queue_xmit(bond, skb, bond->curr_active_slave->dev);
+ if (!bond->curr_active_slave)
+ goto out;
+
+ /* Xmit IGMP frames on all slaves to ensure rapid fail-over
+ for multicast traffic on snooping switches */
+ if (skb->protocol == __constant_htons(ETH_P_IP) &&
+ skb->nh.iph->protocol == IPPROTO_IGMP) {
+ struct slave *slave, *active_slave;
+ int i;
+
+ active_slave = bond->curr_active_slave;
+ bond_for_each_slave_from_to(bond, slave, i, active_slave->next,
+ active_slave->prev)
+ if (IS_UP(slave->dev) &&
+ (slave->link == BOND_LINK_UP))
+ bond_activebackup_xmit_copy(skb, bond, slave);
}
+ res = bond_dev_queue_xmit(bond, skb, bond->curr_active_slave->dev);
+
out:
if (res) {
/* no suitable interface, frame not sent */
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index 2e617424d3f..50f43dbf31a 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -489,7 +489,7 @@ static int cas_page_free(struct cas *cp, cas_page_t *page)
/* local page allocation routines for the receive buffers. jumbo pages
* require at least 8K contiguous and 8K aligned buffers.
*/
-static cas_page_t *cas_page_alloc(struct cas *cp, const int flags)
+static cas_page_t *cas_page_alloc(struct cas *cp, const gfp_t flags)
{
cas_page_t *page;
@@ -561,7 +561,7 @@ static void cas_spare_free(struct cas *cp)
}
/* replenish spares if needed */
-static void cas_spare_recover(struct cas *cp, const int flags)
+static void cas_spare_recover(struct cas *cp, const gfp_t flags)
{
struct list_head list, *elem, *tmp;
int needed, i;
diff --git a/drivers/net/declance.c b/drivers/net/declance.c
index 521c83137bf..f130bdab3fd 100644
--- a/drivers/net/declance.c
+++ b/drivers/net/declance.c
@@ -5,7 +5,7 @@
*
* adopted from sunlance.c by Richard van den Berg
*
- * Copyright (C) 2002, 2003 Maciej W. Rozycki
+ * Copyright (C) 2002, 2003, 2005 Maciej W. Rozycki
*
* additional sources:
* - PMAD-AA TURBOchannel Ethernet Module Functional Specification,
@@ -57,13 +57,15 @@
#include <linux/string.h>
#include <asm/addrspace.h>
+#include <asm/system.h>
+
#include <asm/dec/interrupts.h>
#include <asm/dec/ioasic.h>
#include <asm/dec/ioasic_addrs.h>
#include <asm/dec/kn01.h>
#include <asm/dec/machtype.h>
+#include <asm/dec/system.h>
#include <asm/dec/tc.h>
-#include <asm/system.h>
static char version[] __devinitdata =
"declance.c: v0.009 by Linux MIPS DECstation task force\n";
@@ -79,10 +81,6 @@ MODULE_LICENSE("GPL");
#define PMAD_LANCE 2
#define PMAX_LANCE 3
-#ifndef CONFIG_TC
-unsigned long system_base;
-unsigned long dmaptr;
-#endif
#define LE_CSR0 0
#define LE_CSR1 1
@@ -237,7 +235,7 @@ struct lance_init_block {
/*
* This works *only* for the ring descriptors
*/
-#define LANCE_ADDR(x) (PHYSADDR(x) >> 1)
+#define LANCE_ADDR(x) (CPHYSADDR(x) >> 1)
struct lance_private {
struct net_device *next;
@@ -697,12 +695,13 @@ out:
spin_unlock(&lp->lock);
}
-static void lance_dma_merr_int(const int irq, void *dev_id,
- struct pt_regs *regs)
+static irqreturn_t lance_dma_merr_int(const int irq, void *dev_id,
+ struct pt_regs *regs)
{
struct net_device *dev = (struct net_device *) dev_id;
printk("%s: DMA error\n", dev->name);
+ return IRQ_HANDLED;
}
static irqreturn_t
@@ -1026,10 +1025,6 @@ static int __init dec_lance_init(const int type, const int slot)
unsigned long esar_base;
unsigned char *esar;
-#ifndef CONFIG_TC
- system_base = KN01_LANCE_BASE;
-#endif
-
if (dec_lance_debug && version_printed++ == 0)
printk(version);
@@ -1062,16 +1057,16 @@ static int __init dec_lance_init(const int type, const int slot)
switch (type) {
#ifdef CONFIG_TC
case ASIC_LANCE:
- dev->base_addr = system_base + IOASIC_LANCE;
+ dev->base_addr = CKSEG1ADDR(dec_kn_slot_base + IOASIC_LANCE);
/* buffer space for the on-board LANCE shared memory */
/*
* FIXME: ugly hack!
*/
- dev->mem_start = KSEG1ADDR(0x00020000);
+ dev->mem_start = CKSEG1ADDR(0x00020000);
dev->mem_end = dev->mem_start + 0x00020000;
dev->irq = dec_interrupt[DEC_IRQ_LANCE];
- esar_base = system_base + IOASIC_ESAR;
+ esar_base = CKSEG1ADDR(dec_kn_slot_base + IOASIC_ESAR);
/* Workaround crash with booting KN04 2.1k from Disk */
memset((void *)dev->mem_start, 0,
@@ -1101,14 +1096,14 @@ static int __init dec_lance_init(const int type, const int slot)
/* Setup I/O ASIC LANCE DMA. */
lp->dma_irq = dec_interrupt[DEC_IRQ_LANCE_MERR];
ioasic_write(IO_REG_LANCE_DMA_P,
- PHYSADDR(dev->mem_start) << 3);
+ CPHYSADDR(dev->mem_start) << 3);
break;
case PMAD_LANCE:
claim_tc_card(slot);
- dev->mem_start = get_tc_base_addr(slot);
+ dev->mem_start = CKSEG1ADDR(get_tc_base_addr(slot));
dev->base_addr = dev->mem_start + 0x100000;
dev->irq = get_tc_irq_nr(slot);
esar_base = dev->mem_start + 0x1c0002;
@@ -1137,9 +1132,9 @@ static int __init dec_lance_init(const int type, const int slot)
case PMAX_LANCE:
dev->irq = dec_interrupt[DEC_IRQ_LANCE];
- dev->base_addr = KN01_LANCE_BASE;
- dev->mem_start = KN01_LANCE_BASE + 0x01000000;
- esar_base = KN01_RTC_BASE + 1;
+ dev->base_addr = CKSEG1ADDR(KN01_SLOT_BASE + KN01_LANCE);
+ dev->mem_start = CKSEG1ADDR(KN01_SLOT_BASE + KN01_LANCE_MEM);
+ esar_base = CKSEG1ADDR(KN01_SLOT_BASE + KN01_ESAR + 1);
lp->dma_irq = -1;
/*
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 40887f09b68..eb169a8e877 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -2201,6 +2201,7 @@ static struct ethtool_ops e100_ethtool_ops = {
.phys_id = e100_phys_id,
.get_stats_count = e100_get_stats_count,
.get_ethtool_stats = e100_get_ethtool_stats,
+ .get_perm_addr = ethtool_op_get_perm_addr,
};
static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
@@ -2351,7 +2352,8 @@ static int __devinit e100_probe(struct pci_dev *pdev,
e100_phy_init(nic);
memcpy(netdev->dev_addr, nic->eeprom, ETH_ALEN);
- if(!is_valid_ether_addr(netdev->dev_addr)) {
+ memcpy(netdev->perm_addr, nic->eeprom, ETH_ALEN);
+ if(!is_valid_ether_addr(netdev->perm_addr)) {
DPRINTK(PROBE, ERR, "Invalid MAC address from "
"EEPROM, aborting.\n");
err = -EAGAIN;
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h
index 092757bc721..3f653a93e1b 100644
--- a/drivers/net/e1000/e1000.h
+++ b/drivers/net/e1000/e1000.h
@@ -72,6 +72,10 @@
#include <linux/mii.h>
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
+#ifdef CONFIG_E1000_MQ
+#include <linux/cpu.h>
+#include <linux/smp.h>
+#endif
#define BAR_0 0
#define BAR_1 1
@@ -165,10 +169,33 @@ struct e1000_buffer {
uint16_t next_to_watch;
};
-struct e1000_ps_page { struct page *ps_page[MAX_PS_BUFFERS]; };
-struct e1000_ps_page_dma { uint64_t ps_page_dma[MAX_PS_BUFFERS]; };
+struct e1000_ps_page { struct page *ps_page[PS_PAGE_BUFFERS]; };
+struct e1000_ps_page_dma { uint64_t ps_page_dma[PS_PAGE_BUFFERS]; };
+
+struct e1000_tx_ring {
+ /* pointer to the descriptor ring memory */
+ void *desc;
+ /* physical address of the descriptor ring */
+ dma_addr_t dma;
+ /* length of descriptor ring in bytes */
+ unsigned int size;
+ /* number of descriptors in the ring */
+ unsigned int count;
+ /* next descriptor to associate a buffer with */
+ unsigned int next_to_use;
+ /* next descriptor to check for DD status bit */
+ unsigned int next_to_clean;
+ /* array of buffer information structs */
+ struct e1000_buffer *buffer_info;
+
+ struct e1000_buffer previous_buffer_info;
+ spinlock_t tx_lock;
+ uint16_t tdh;
+ uint16_t tdt;
+ uint64_t pkt;
+};
-struct e1000_desc_ring {
+struct e1000_rx_ring {
/* pointer to the descriptor ring memory */
void *desc;
/* physical address of the descriptor ring */
@@ -186,6 +213,10 @@ struct e1000_desc_ring {
/* arrays of page information for packet split */
struct e1000_ps_page *ps_page;
struct e1000_ps_page_dma *ps_page_dma;
+
+ uint16_t rdh;
+ uint16_t rdt;
+ uint64_t pkt;
};
#define E1000_DESC_UNUSED(R) \
@@ -227,9 +258,10 @@ struct e1000_adapter {
unsigned long led_status;
/* TX */
- struct e1000_desc_ring tx_ring;
- struct e1000_buffer previous_buffer_info;
- spinlock_t tx_lock;
+ struct e1000_tx_ring *tx_ring; /* One per active queue */
+#ifdef CONFIG_E1000_MQ
+ struct e1000_tx_ring **cpu_tx_ring; /* per-cpu */
+#endif
uint32_t txd_cmd;
uint32_t tx_int_delay;
uint32_t tx_abs_int_delay;
@@ -246,19 +278,33 @@ struct e1000_adapter {
/* RX */
#ifdef CONFIG_E1000_NAPI
- boolean_t (*clean_rx) (struct e1000_adapter *adapter, int *work_done,
- int work_to_do);
+ boolean_t (*clean_rx) (struct e1000_adapter *adapter,
+ struct e1000_rx_ring *rx_ring,
+ int *work_done, int work_to_do);
#else
- boolean_t (*clean_rx) (struct e1000_adapter *adapter);
+ boolean_t (*clean_rx) (struct e1000_adapter *adapter,
+ struct e1000_rx_ring *rx_ring);
#endif
- void (*alloc_rx_buf) (struct e1000_adapter *adapter);
- struct e1000_desc_ring rx_ring;
+ void (*alloc_rx_buf) (struct e1000_adapter *adapter,
+ struct e1000_rx_ring *rx_ring);
+ struct e1000_rx_ring *rx_ring; /* One per active queue */
+#ifdef CONFIG_E1000_NAPI
+ struct net_device *polling_netdev; /* One per active queue */
+#endif
+#ifdef CONFIG_E1000_MQ
+ struct net_device **cpu_netdev; /* per-cpu */
+ struct call_async_data_struct rx_sched_call_data;
+ int cpu_for_queue[4];
+#endif
+ int num_queues;
+
uint64_t hw_csum_err;
uint64_t hw_csum_good;
+ uint64_t rx_hdr_split;
uint32_t rx_int_delay;
uint32_t rx_abs_int_delay;
boolean_t rx_csum;
- boolean_t rx_ps;
+ unsigned int rx_ps_pages;
uint32_t gorcl;
uint64_t gorcl_old;
uint16_t rx_ps_bsize0;
@@ -278,8 +324,8 @@ struct e1000_adapter {
struct e1000_phy_stats phy_stats;
uint32_t test_icr;
- struct e1000_desc_ring test_tx_ring;
- struct e1000_desc_ring test_rx_ring;
+ struct e1000_tx_ring test_tx_ring;
+ struct e1000_rx_ring test_rx_ring;
int msg_enable;
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c
index f133ff0b0b9..6b9acc7f94a 100644
--- a/drivers/net/e1000/e1000_ethtool.c
+++ b/drivers/net/e1000/e1000_ethtool.c
@@ -39,10 +39,10 @@ extern int e1000_up(struct e1000_adapter *adapter);
extern void e1000_down(struct e1000_adapter *adapter);
extern void e1000_reset(struct e1000_adapter *adapter);
extern int e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx);
-extern int e1000_setup_rx_resources(struct e1000_adapter *adapter);
-extern int e1000_setup_tx_resources(struct e1000_adapter *adapter);
-extern void e1000_free_rx_resources(struct e1000_adapter *adapter);
-extern void e1000_free_tx_resources(struct e1000_adapter *adapter);
+extern int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
+extern int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
+extern void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
+extern void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
extern void e1000_update_stats(struct e1000_adapter *adapter);
struct e1000_stats {
@@ -91,7 +91,8 @@ static const struct e1000_stats e1000_gstrings_stats[] = {
{ "tx_flow_control_xoff", E1000_STAT(stats.xofftxc) },
{ "rx_long_byte_count", E1000_STAT(stats.gorcl) },
{ "rx_csum_offload_good", E1000_STAT(hw_csum_good) },
- { "rx_csum_offload_errors", E1000_STAT(hw_csum_err) }
+ { "rx_csum_offload_errors", E1000_STAT(hw_csum_err) },
+ { "rx_header_split", E1000_STAT(rx_hdr_split) },
};
#define E1000_STATS_LEN \
sizeof(e1000_gstrings_stats) / sizeof(struct e1000_stats)
@@ -546,8 +547,10 @@ e1000_set_eeprom(struct net_device *netdev,
ret_val = e1000_write_eeprom(hw, first_word,
last_word - first_word + 1, eeprom_buff);
- /* Update the checksum over the first part of the EEPROM if needed */
- if((ret_val == 0) && first_word <= EEPROM_CHECKSUM_REG)
+ /* Update the checksum over the first part of the EEPROM if needed
+ * and flush shadow RAM for 82573 conrollers */
+ if((ret_val == 0) && ((first_word <= EEPROM_CHECKSUM_REG) ||
+ (hw->mac_type == e1000_82573)))
e1000_update_eeprom_checksum(hw);
kfree(eeprom_buff);
@@ -576,8 +579,8 @@ e1000_get_ringparam(struct net_device *netdev,
{
struct e1000_adapter *adapter = netdev_priv(netdev);
e1000_mac_type mac_type = adapter->hw.mac_type;
- struct e1000_desc_ring *txdr = &adapter->tx_ring;
- struct e1000_desc_ring *rxdr = &adapter->rx_ring;
+ struct e1000_tx_ring *txdr = adapter->tx_ring;
+ struct e1000_rx_ring *rxdr = adapter->rx_ring;
ring->rx_max_pending = (mac_type < e1000_82544) ? E1000_MAX_RXD :
E1000_MAX_82544_RXD;
@@ -597,20 +600,40 @@ e1000_set_ringparam(struct net_device *netdev,
{
struct e1000_adapter *adapter = netdev_priv(netdev);
e1000_mac_type mac_type = adapter->hw.mac_type;
- struct e1000_desc_ring *txdr = &adapter->tx_ring;
- struct e1000_desc_ring *rxdr = &adapter->rx_ring;
- struct e1000_desc_ring tx_old, tx_new, rx_old, rx_new;
- int err;
+ struct e1000_tx_ring *txdr, *tx_old, *tx_new;
+ struct e1000_rx_ring *rxdr, *rx_old, *rx_new;
+ int i, err, tx_ring_size, rx_ring_size;
+
+ tx_ring_size = sizeof(struct e1000_tx_ring) * adapter->num_queues;
+ rx_ring_size = sizeof(struct e1000_rx_ring) * adapter->num_queues;
+
+ if (netif_running(adapter->netdev))
+ e1000_down(adapter);
tx_old = adapter->tx_ring;
rx_old = adapter->rx_ring;
+ adapter->tx_ring = kmalloc(tx_ring_size, GFP_KERNEL);
+ if (!adapter->tx_ring) {
+ err = -ENOMEM;
+ goto err_setup_rx;
+ }
+ memset(adapter->tx_ring, 0, tx_ring_size);
+
+ adapter->rx_ring = kmalloc(rx_ring_size, GFP_KERNEL);
+ if (!adapter->rx_ring) {
+ kfree(adapter->tx_ring);
+ err = -ENOMEM;
+ goto err_setup_rx;
+ }
+ memset(adapter->rx_ring, 0, rx_ring_size);
+
+ txdr = adapter->tx_ring;
+ rxdr = adapter->rx_ring;
+
if((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
return -EINVAL;
- if(netif_running(adapter->netdev))
- e1000_down(adapter);
-
rxdr->count = max(ring->rx_pending,(uint32_t)E1000_MIN_RXD);
rxdr->count = min(rxdr->count,(uint32_t)(mac_type < e1000_82544 ?
E1000_MAX_RXD : E1000_MAX_82544_RXD));
@@ -621,11 +644,16 @@ e1000_set_ringparam(struct net_device *netdev,
E1000_MAX_TXD : E1000_MAX_82544_TXD));
E1000_ROUNDUP(txdr->count, REQ_TX_DESCRIPTOR_MULTIPLE);
+ for (i = 0; i < adapter->num_queues; i++) {
+ txdr[i].count = txdr->count;
+ rxdr[i].count = rxdr->count;
+ }
+
if(netif_running(adapter->netdev)) {
/* Try to get new resources before deleting old */
- if((err = e1000_setup_rx_resources(adapter)))
+ if ((err = e1000_setup_all_rx_resources(adapter)))
goto err_setup_rx;
- if((err = e1000_setup_tx_resources(adapter)))
+ if ((err = e1000_setup_all_tx_resources(adapter)))
goto err_setup_tx;
/* save the new, restore the old in order to free it,
@@ -635,8 +663,10 @@ e1000_set_ringparam(struct net_device *netdev,
tx_new = adapter->tx_ring;
adapter->rx_ring = rx_old;
adapter->tx_ring = tx_old;
- e1000_free_rx_resources(adapter);
- e1000_free_tx_resources(adapter);
+ e1000_free_all_rx_resources(adapter);
+ e1000_free_all_tx_resources(adapter);
+ kfree(tx_old);
+ kfree(rx_old);
adapter->rx_ring = rx_new;
adapter->tx_ring = tx_new;
if((err = e1000_up(adapter)))
@@ -645,7 +675,7 @@ e1000_set_ringparam(struct net_device *netdev,
return 0;
err_setup_tx:
- e1000_free_rx_resources(adapter);
+ e1000_free_all_rx_resources(adapter);
err_setup_rx:
adapter->rx_ring = rx_old;
adapter->tx_ring = tx_old;
@@ -696,6 +726,11 @@ e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data)
* Some bits that get toggled are ignored.
*/
switch (adapter->hw.mac_type) {
+ /* there are several bits on newer hardware that are r/w */
+ case e1000_82571:
+ case e1000_82572:
+ toggle = 0x7FFFF3FF;
+ break;
case e1000_82573:
toggle = 0x7FFFF033;
break;
@@ -898,8 +933,8 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
static void
e1000_free_desc_rings(struct e1000_adapter *adapter)
{
- struct e1000_desc_ring *txdr = &adapter->test_tx_ring;
- struct e1000_desc_ring *rxdr = &adapter->test_rx_ring;
+ struct e1000_tx_ring *txdr = &adapter->test_tx_ring;
+ struct e1000_rx_ring *rxdr = &adapter->test_rx_ring;
struct pci_dev *pdev = adapter->pdev;
int i;
@@ -941,8 +976,8 @@ e1000_free_desc_rings(struct e1000_adapter *adapter)
static int
e1000_setup_desc_rings(struct e1000_adapter *adapter)
{
- struct e1000_desc_ring *txdr = &adapter->test_tx_ring;
- struct e1000_desc_ring *rxdr = &adapter->test_rx_ring;
+ struct e1000_tx_ring *txdr = &adapter->test_tx_ring;
+ struct e1000_rx_ring *rxdr = &adapter->test_rx_ring;
struct pci_dev *pdev = adapter->pdev;
uint32_t rctl;
int size, i, ret_val;
@@ -1245,6 +1280,8 @@ e1000_set_phy_loopback(struct e1000_adapter *adapter)
case e1000_82541_rev_2:
case e1000_82547:
case e1000_82547_rev_2:
+ case e1000_82571:
+ case e1000_82572:
case e1000_82573:
return e1000_integrated_phy_loopback(adapter);
break;
@@ -1340,8 +1377,8 @@ e1000_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size)
static int
e1000_run_loopback_test(struct e1000_adapter *adapter)
{
- struct e1000_desc_ring *txdr = &adapter->test_tx_ring;
- struct e1000_desc_ring *rxdr = &adapter->test_rx_ring;
+ struct e1000_tx_ring *txdr = &adapter->test_tx_ring;
+ struct e1000_rx_ring *rxdr = &adapter->test_rx_ring;
struct pci_dev *pdev = adapter->pdev;
int i, j, k, l, lc, good_cnt, ret_val=0;
unsigned long time;
@@ -1509,6 +1546,7 @@ e1000_diag_test(struct net_device *netdev,
data[2] = 0;
data[3] = 0;
}
+ msleep_interruptible(4 * 1000);
}
static void
@@ -1625,7 +1663,7 @@ e1000_phys_id(struct net_device *netdev, uint32_t data)
if(!data || data > (uint32_t)(MAX_SCHEDULE_TIMEOUT / HZ))
data = (uint32_t)(MAX_SCHEDULE_TIMEOUT / HZ);
- if(adapter->hw.mac_type < e1000_82573) {
+ if(adapter->hw.mac_type < e1000_82571) {
if(!adapter->blink_timer.function) {
init_timer(&adapter->blink_timer);
adapter->blink_timer.function = e1000_led_blink_callback;
@@ -1739,6 +1777,7 @@ struct ethtool_ops e1000_ethtool_ops = {
.phys_id = e1000_phys_id,
.get_stats_count = e1000_get_stats_count,
.get_ethtool_stats = e1000_get_ethtool_stats,
+ .get_perm_addr = ethtool_op_get_perm_addr,
};
void e1000_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c
index 045f5426ab9..8fc876da43b 100644
--- a/drivers/net/e1000/e1000_hw.c
+++ b/drivers/net/e1000/e1000_hw.c
@@ -83,14 +83,14 @@ uint16_t e1000_igp_cable_length_table[IGP01E1000_AGC_LENGTH_TABLE_SIZE] =
static const
uint16_t e1000_igp_2_cable_length_table[IGP02E1000_AGC_LENGTH_TABLE_SIZE] =
- { 8, 13, 17, 19, 21, 23, 25, 27, 29, 31, 33, 35, 37, 39, 41, 43,
- 22, 24, 27, 30, 32, 35, 37, 40, 42, 44, 47, 49, 51, 54, 56, 58,
- 32, 35, 38, 41, 44, 47, 50, 53, 55, 58, 61, 63, 66, 69, 71, 74,
- 43, 47, 51, 54, 58, 61, 64, 67, 71, 74, 77, 80, 82, 85, 88, 90,
- 57, 62, 66, 70, 74, 77, 81, 85, 88, 91, 94, 97, 100, 103, 106, 108,
- 73, 78, 82, 87, 91, 95, 98, 102, 105, 109, 112, 114, 117, 119, 122, 124,
- 91, 96, 101, 105, 109, 113, 116, 119, 122, 125, 127, 128, 128, 128, 128, 128,
- 108, 113, 117, 121, 124, 127, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128};
+ { 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21,
+ 0, 0, 0, 3, 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41,
+ 6, 10, 14, 18, 22, 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61,
+ 21, 26, 31, 35, 40, 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82,
+ 40, 45, 51, 56, 61, 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104,
+ 60, 66, 72, 77, 82, 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121,
+ 83, 89, 95, 100, 105, 109, 113, 116, 119, 122, 124,
+ 104, 109, 114, 118, 121, 124};
/******************************************************************************
@@ -286,7 +286,6 @@ e1000_set_mac_type(struct e1000_hw *hw)
case E1000_DEV_ID_82546GB_FIBER:
case E1000_DEV_ID_82546GB_SERDES:
case E1000_DEV_ID_82546GB_PCIE:
- case E1000_DEV_ID_82546GB_QUAD_COPPER:
hw->mac_type = e1000_82546_rev_3;
break;
case E1000_DEV_ID_82541EI:
@@ -305,8 +304,19 @@ e1000_set_mac_type(struct e1000_hw *hw)
case E1000_DEV_ID_82547GI:
hw->mac_type = e1000_82547_rev_2;
break;
+ case E1000_DEV_ID_82571EB_COPPER:
+ case E1000_DEV_ID_82571EB_FIBER:
+ case E1000_DEV_ID_82571EB_SERDES:
+ hw->mac_type = e1000_82571;
+ break;
+ case E1000_DEV_ID_82572EI_COPPER:
+ case E1000_DEV_ID_82572EI_FIBER:
+ case E1000_DEV_ID_82572EI_SERDES:
+ hw->mac_type = e1000_82572;
+ break;
case E1000_DEV_ID_82573E:
case E1000_DEV_ID_82573E_IAMT:
+ case E1000_DEV_ID_82573L:
hw->mac_type = e1000_82573;
break;
default:
@@ -315,6 +325,8 @@ e1000_set_mac_type(struct e1000_hw *hw)
}
switch(hw->mac_type) {
+ case e1000_82571:
+ case e1000_82572:
case e1000_82573:
hw->eeprom_semaphore_present = TRUE;
/* fall through */
@@ -351,6 +363,8 @@ e1000_set_media_type(struct e1000_hw *hw)
switch (hw->device_id) {
case E1000_DEV_ID_82545GM_SERDES:
case E1000_DEV_ID_82546GB_SERDES:
+ case E1000_DEV_ID_82571EB_SERDES:
+ case E1000_DEV_ID_82572EI_SERDES:
hw->media_type = e1000_media_type_internal_serdes;
break;
default:
@@ -523,6 +537,8 @@ e1000_reset_hw(struct e1000_hw *hw)
E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext);
E1000_WRITE_FLUSH(hw);
/* fall through */
+ case e1000_82571:
+ case e1000_82572:
ret_val = e1000_get_auto_rd_done(hw);
if(ret_val)
/* We don't want to continue accessing MAC registers. */
@@ -683,6 +699,9 @@ e1000_init_hw(struct e1000_hw *hw)
switch (hw->mac_type) {
default:
break;
+ case e1000_82571:
+ case e1000_82572:
+ ctrl |= (1 << 22);
case e1000_82573:
ctrl |= E1000_TXDCTL_COUNT_DESC;
break;
@@ -694,6 +713,26 @@ e1000_init_hw(struct e1000_hw *hw)
e1000_enable_tx_pkt_filtering(hw);
}
+ switch (hw->mac_type) {
+ default:
+ break;
+ case e1000_82571:
+ case e1000_82572:
+ ctrl = E1000_READ_REG(hw, TXDCTL1);
+ ctrl &= ~E1000_TXDCTL_WTHRESH;
+ ctrl |= E1000_TXDCTL_COUNT_DESC | E1000_TXDCTL_FULL_TX_DESC_WB;
+ ctrl |= (1 << 22);
+ E1000_WRITE_REG(hw, TXDCTL1, ctrl);
+ break;
+ }
+
+
+
+ if (hw->mac_type == e1000_82573) {
+ uint32_t gcr = E1000_READ_REG(hw, GCR);
+ gcr |= E1000_GCR_L1_ACT_WITHOUT_L0S_RX;
+ E1000_WRITE_REG(hw, GCR, gcr);
+ }
/* Clear all of the statistics registers (clear on read). It is
* important that we do this after we have tried to establish link
@@ -878,6 +917,14 @@ e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
DEBUGFUNC("e1000_setup_fiber_serdes_link");
+ /* On 82571 and 82572 Fiber connections, SerDes loopback mode persists
+ * until explicitly turned off or a power cycle is performed. A read to
+ * the register does not indicate its status. Therefore, we ensure
+ * loopback mode is disabled during initialization.
+ */
+ if (hw->mac_type == e1000_82571 || hw->mac_type == e1000_82572)
+ E1000_WRITE_REG(hw, SCTL, E1000_DISABLE_SERDES_LOOPBACK);
+
/* On adapters with a MAC newer than 82544, SW Defineable pin 1 will be
* set when the optics detect a signal. On older adapters, it will be
* cleared when there is a signal. This applies to fiber media only.
@@ -2943,6 +2990,8 @@ e1000_phy_reset(struct e1000_hw *hw)
switch (hw->mac_type) {
case e1000_82541_rev_2:
+ case e1000_82571:
+ case e1000_82572:
ret_val = e1000_phy_hw_reset(hw);
if(ret_val)
return ret_val;
@@ -2981,6 +3030,16 @@ e1000_detect_gig_phy(struct e1000_hw *hw)
DEBUGFUNC("e1000_detect_gig_phy");
+ /* The 82571 firmware may still be configuring the PHY. In this
+ * case, we cannot access the PHY until the configuration is done. So
+ * we explicitly set the PHY values. */
+ if(hw->mac_type == e1000_82571 ||
+ hw->mac_type == e1000_82572) {
+ hw->phy_id = IGP01E1000_I_PHY_ID;
+ hw->phy_type = e1000_phy_igp_2;
+ return E1000_SUCCESS;
+ }
+
/* Read the PHY ID Registers to identify which PHY is onboard. */
ret_val = e1000_read_phy_reg(hw, PHY_ID1, &phy_id_high);
if(ret_val)
@@ -3334,6 +3393,21 @@ e1000_init_eeprom_params(struct e1000_hw *hw)
eeprom->use_eerd = FALSE;
eeprom->use_eewr = FALSE;
break;
+ case e1000_82571:
+ case e1000_82572:
+ eeprom->type = e1000_eeprom_spi;
+ eeprom->opcode_bits = 8;
+ eeprom->delay_usec = 1;
+ if (eecd & E1000_EECD_ADDR_BITS) {
+ eeprom->page_size = 32;
+ eeprom->address_bits = 16;
+ } else {
+ eeprom->page_size = 8;
+ eeprom->address_bits = 8;
+ }
+ eeprom->use_eerd = FALSE;
+ eeprom->use_eewr = FALSE;
+ break;
case e1000_82573:
eeprom->type = e1000_eeprom_spi;
eeprom->opcode_bits = 8;
@@ -3543,25 +3617,26 @@ e1000_acquire_eeprom(struct e1000_hw *hw)
eecd = E1000_READ_REG(hw, EECD);
if (hw->mac_type != e1000_82573) {
- /* Request EEPROM Access */
- if(hw->mac_type > e1000_82544) {
- eecd |= E1000_EECD_REQ;
- E1000_WRITE_REG(hw, EECD, eecd);
- eecd = E1000_READ_REG(hw, EECD);
- while((!(eecd & E1000_EECD_GNT)) &&
- (i < E1000_EEPROM_GRANT_ATTEMPTS)) {
- i++;
- udelay(5);
- eecd = E1000_READ_REG(hw, EECD);
- }
- if(!(eecd & E1000_EECD_GNT)) {
- eecd &= ~E1000_EECD_REQ;
+ /* Request EEPROM Access */
+ if(hw->mac_type > e1000_82544) {
+ eecd |= E1000_EECD_REQ;
E1000_WRITE_REG(hw, EECD, eecd);
- DEBUGOUT("Could not acquire EEPROM grant\n");
- return -E1000_ERR_EEPROM;
+ eecd = E1000_READ_REG(hw, EECD);
+ while((!(eecd & E1000_EECD_GNT)) &&
+ (i < E1000_EEPROM_GRANT_ATTEMPTS)) {
+ i++;
+ udelay(5);
+ eecd = E1000_READ_REG(hw, EECD);
+ }
+ if(!(eecd & E1000_EECD_GNT)) {
+ eecd &= ~E1000_EECD_REQ;
+ E1000_WRITE_REG(hw, EECD, eecd);
+ DEBUGOUT("Could not acquire EEPROM grant\n");
+ e1000_put_hw_eeprom_semaphore(hw);
+ return -E1000_ERR_EEPROM;
+ }
}
}
- }
/* Setup EEPROM for Read/Write */
@@ -4064,7 +4139,7 @@ e1000_write_eeprom(struct e1000_hw *hw,
return -E1000_ERR_EEPROM;
}
- /* 82573 reads only through eerd */
+ /* 82573 writes only through eewr */
if(eeprom->use_eewr == TRUE)
return e1000_write_eeprom_eewr(hw, offset, words, data);
@@ -4353,9 +4428,16 @@ e1000_read_mac_addr(struct e1000_hw * hw)
hw->perm_mac_addr[i] = (uint8_t) (eeprom_data & 0x00FF);
hw->perm_mac_addr[i+1] = (uint8_t) (eeprom_data >> 8);
}
- if(((hw->mac_type == e1000_82546) || (hw->mac_type == e1000_82546_rev_3)) &&
- (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1))
+ switch (hw->mac_type) {
+ default:
+ break;
+ case e1000_82546:
+ case e1000_82546_rev_3:
+ case e1000_82571:
+ if(E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)
hw->perm_mac_addr[5] ^= 0x01;
+ break;
+ }
for(i = 0; i < NODE_ADDRESS_SIZE; i++)
hw->mac_addr[i] = hw->perm_mac_addr[i];
@@ -4385,6 +4467,12 @@ e1000_init_rx_addrs(struct e1000_hw *hw)
e1000_rar_set(hw, hw->mac_addr, 0);
rar_num = E1000_RAR_ENTRIES;
+
+ /* Reserve a spot for the Locally Administered Address to work around
+ * an 82571 issue in which a reset on one port will reload the MAC on
+ * the other port. */
+ if ((hw->mac_type == e1000_82571) && (hw->laa_is_present == TRUE))
+ rar_num -= 1;
/* Zero out the other 15 receive addresses. */
DEBUGOUT("Clearing RAR[1-15]\n");
for(i = 1; i < rar_num; i++) {
@@ -4427,6 +4515,12 @@ e1000_mc_addr_list_update(struct e1000_hw *hw,
/* Clear RAR[1-15] */
DEBUGOUT(" Clearing RAR[1-15]\n");
num_rar_entry = E1000_RAR_ENTRIES;
+ /* Reserve a spot for the Locally Administered Address to work around
+ * an 82571 issue in which a reset on one port will reload the MAC on
+ * the other port. */
+ if ((hw->mac_type == e1000_82571) && (hw->laa_is_present == TRUE))
+ num_rar_entry -= 1;
+
for(i = rar_used_count; i < num_rar_entry; i++) {
E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
E1000_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0);
@@ -4984,7 +5078,6 @@ e1000_clear_hw_cntrs(struct e1000_hw *hw)
temp = E1000_READ_REG(hw, ICTXQEC);
temp = E1000_READ_REG(hw, ICTXQMTC);
temp = E1000_READ_REG(hw, ICRXDMTC);
-
}
/******************************************************************************
@@ -5151,6 +5244,8 @@ e1000_get_bus_info(struct e1000_hw *hw)
hw->bus_speed = e1000_bus_speed_unknown;
hw->bus_width = e1000_bus_width_unknown;
break;
+ case e1000_82571:
+ case e1000_82572:
case e1000_82573:
hw->bus_type = e1000_bus_type_pci_express;
hw->bus_speed = e1000_bus_speed_2500;
@@ -5250,6 +5345,7 @@ e1000_get_cable_length(struct e1000_hw *hw,
int32_t ret_val;
uint16_t agc_value = 0;
uint16_t cur_agc, min_agc = IGP01E1000_AGC_LENGTH_TABLE_SIZE;
+ uint16_t max_agc = 0;
uint16_t i, phy_data;
uint16_t cable_length;
@@ -5338,6 +5434,40 @@ e1000_get_cable_length(struct e1000_hw *hw,
IGP01E1000_AGC_RANGE) : 0;
*max_length = e1000_igp_cable_length_table[agc_value] +
IGP01E1000_AGC_RANGE;
+ } else if (hw->phy_type == e1000_phy_igp_2) {
+ uint16_t agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] =
+ {IGP02E1000_PHY_AGC_A,
+ IGP02E1000_PHY_AGC_B,
+ IGP02E1000_PHY_AGC_C,
+ IGP02E1000_PHY_AGC_D};
+ /* Read the AGC registers for all channels */
+ for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) {
+ ret_val = e1000_read_phy_reg(hw, agc_reg_array[i], &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* Getting bits 15:9, which represent the combination of course and
+ * fine gain values. The result is a number that can be put into
+ * the lookup table to obtain the approximate cable length. */
+ cur_agc = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) &
+ IGP02E1000_AGC_LENGTH_MASK;
+
+ /* Remove min & max AGC values from calculation. */
+ if (e1000_igp_2_cable_length_table[min_agc] > e1000_igp_2_cable_length_table[cur_agc])
+ min_agc = cur_agc;
+ if (e1000_igp_2_cable_length_table[max_agc] < e1000_igp_2_cable_length_table[cur_agc])
+ max_agc = cur_agc;
+
+ agc_value += e1000_igp_2_cable_length_table[cur_agc];
+ }
+
+ agc_value -= (e1000_igp_2_cable_length_table[min_agc] + e1000_igp_2_cable_length_table[max_agc]);
+ agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2);
+
+ /* Calculate cable length with the error range of +/- 10 meters. */
+ *min_length = ((agc_value - IGP02E1000_AGC_RANGE) > 0) ?
+ (agc_value - IGP02E1000_AGC_RANGE) : 0;
+ *max_length = agc_value + IGP02E1000_AGC_RANGE;
}
return E1000_SUCCESS;
@@ -6465,6 +6595,8 @@ e1000_get_auto_rd_done(struct e1000_hw *hw)
default:
msec_delay(5);
break;
+ case e1000_82571:
+ case e1000_82572:
case e1000_82573:
while(timeout) {
if (E1000_READ_REG(hw, EECD) & E1000_EECD_AUTO_RD) break;
@@ -6494,10 +6626,31 @@ e1000_get_auto_rd_done(struct e1000_hw *hw)
int32_t
e1000_get_phy_cfg_done(struct e1000_hw *hw)
{
+ int32_t timeout = PHY_CFG_TIMEOUT;
+ uint32_t cfg_mask = E1000_EEPROM_CFG_DONE;
+
DEBUGFUNC("e1000_get_phy_cfg_done");
- /* Simply wait for 10ms */
- msec_delay(10);
+ switch (hw->mac_type) {
+ default:
+ msec_delay(10);
+ break;
+ case e1000_82571:
+ case e1000_82572:
+ while (timeout) {
+ if (E1000_READ_REG(hw, EEMNGCTL) & cfg_mask)
+ break;
+ else
+ msec_delay(1);
+ timeout--;
+ }
+
+ if (!timeout) {
+ DEBUGOUT("MNG configuration cycle has not completed.\n");
+ return -E1000_ERR_RESET;
+ }
+ break;
+ }
return E1000_SUCCESS;
}
@@ -6569,8 +6722,7 @@ e1000_put_hw_eeprom_semaphore(struct e1000_hw *hw)
return;
swsm = E1000_READ_REG(hw, SWSM);
- /* Release both semaphores. */
- swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
+ swsm &= ~(E1000_SWSM_SWESMBI);
E1000_WRITE_REG(hw, SWSM, swsm);
}
@@ -6606,6 +6758,8 @@ e1000_arc_subsystem_valid(struct e1000_hw *hw)
* if this is the case. We read FWSM to determine the manageability mode.
*/
switch (hw->mac_type) {
+ case e1000_82571:
+ case e1000_82572:
case e1000_82573:
fwsm = E1000_READ_REG(hw, FWSM);
if((fwsm & E1000_FWSM_MODE_MASK) != 0)
diff --git a/drivers/net/e1000/e1000_hw.h b/drivers/net/e1000/e1000_hw.h
index 51c2b3a18b6..4f2c196dc31 100644
--- a/drivers/net/e1000/e1000_hw.h
+++ b/drivers/net/e1000/e1000_hw.h
@@ -57,6 +57,8 @@ typedef enum {
e1000_82541_rev_2,
e1000_82547,
e1000_82547_rev_2,
+ e1000_82571,
+ e1000_82572,
e1000_82573,
e1000_num_macs
} e1000_mac_type;
@@ -478,10 +480,16 @@ uint8_t e1000_arc_subsystem_valid(struct e1000_hw *hw);
#define E1000_DEV_ID_82546GB_SERDES 0x107B
#define E1000_DEV_ID_82546GB_PCIE 0x108A
#define E1000_DEV_ID_82547EI 0x1019
+#define E1000_DEV_ID_82571EB_COPPER 0x105E
+#define E1000_DEV_ID_82571EB_FIBER 0x105F
+#define E1000_DEV_ID_82571EB_SERDES 0x1060
+#define E1000_DEV_ID_82572EI_COPPER 0x107D
+#define E1000_DEV_ID_82572EI_FIBER 0x107E
+#define E1000_DEV_ID_82572EI_SERDES 0x107F
#define E1000_DEV_ID_82573E 0x108B
#define E1000_DEV_ID_82573E_IAMT 0x108C
+#define E1000_DEV_ID_82573L 0x109A
-#define E1000_DEV_ID_82546GB_QUAD_COPPER 0x1099
#define NODE_ADDRESS_SIZE 6
#define ETH_LENGTH_OF_ADDRESS 6
@@ -833,6 +841,8 @@ struct e1000_ffvt_entry {
#define E1000_FFMT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX
#define E1000_FFVT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX
+#define E1000_DISABLE_SERDES_LOOPBACK 0x0400
+
/* Register Set. (82543, 82544)
*
* Registers are defined to be 32 bits and should be accessed as 32 bit values.
@@ -853,6 +863,7 @@ struct e1000_ffvt_entry {
#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */
#define E1000_FLA 0x0001C /* Flash Access - RW */
#define E1000_MDIC 0x00020 /* MDI Control - RW */
+#define E1000_SCTL 0x00024 /* SerDes Control - RW */
#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */
#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */
#define E1000_FCT 0x00030 /* Flow Control Type - RW */
@@ -864,6 +875,12 @@ struct e1000_ffvt_entry {
#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */
#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */
#define E1000_RCTL 0x00100 /* RX Control - RW */
+#define E1000_RDTR1 0x02820 /* RX Delay Timer (1) - RW */
+#define E1000_RDBAL1 0x02900 /* RX Descriptor Base Address Low (1) - RW */
+#define E1000_RDBAH1 0x02904 /* RX Descriptor Base Address High (1) - RW */
+#define E1000_RDLEN1 0x02908 /* RX Descriptor Length (1) - RW */
+#define E1000_RDH1 0x02910 /* RX Descriptor Head (1) - RW */
+#define E1000_RDT1 0x02918 /* RX Descriptor Tail (1) - RW */
#define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */
#define E1000_TXCW 0x00178 /* TX Configuration Word - RW */
#define E1000_RXCW 0x00180 /* RX Configuration Word - RO */
@@ -895,6 +912,12 @@ struct e1000_ffvt_entry {
#define E1000_RDH 0x02810 /* RX Descriptor Head - RW */
#define E1000_RDT 0x02818 /* RX Descriptor Tail - RW */
#define E1000_RDTR 0x02820 /* RX Delay Timer - RW */
+#define E1000_RDBAL0 E1000_RDBAL /* RX Desc Base Address Low (0) - RW */
+#define E1000_RDBAH0 E1000_RDBAH /* RX Desc Base Address High (0) - RW */
+#define E1000_RDLEN0 E1000_RDLEN /* RX Desc Length (0) - RW */
+#define E1000_RDH0 E1000_RDH /* RX Desc Head (0) - RW */
+#define E1000_RDT0 E1000_RDT /* RX Desc Tail (0) - RW */
+#define E1000_RDTR0 E1000_RDTR /* RX Delay Timer (0) - RW */
#define E1000_RXDCTL 0x02828 /* RX Descriptor Control - RW */
#define E1000_RADV 0x0282C /* RX Interrupt Absolute Delay Timer - RW */
#define E1000_RSRPD 0x02C00 /* RX Small Packet Detect - RW */
@@ -980,15 +1003,15 @@ struct e1000_ffvt_entry {
#define E1000_BPTC 0x040F4 /* Broadcast Packets TX Count - R/clr */
#define E1000_TSCTC 0x040F8 /* TCP Segmentation Context TX - R/clr */
#define E1000_TSCTFC 0x040FC /* TCP Segmentation Context TX Fail - R/clr */
-#define E1000_IAC 0x4100 /* Interrupt Assertion Count */
-#define E1000_ICRXPTC 0x4104 /* Interrupt Cause Rx Packet Timer Expire Count */
-#define E1000_ICRXATC 0x4108 /* Interrupt Cause Rx Absolute Timer Expire Count */
-#define E1000_ICTXPTC 0x410C /* Interrupt Cause Tx Packet Timer Expire Count */
-#define E1000_ICTXATC 0x4110 /* Interrupt Cause Tx Absolute Timer Expire Count */
-#define E1000_ICTXQEC 0x4118 /* Interrupt Cause Tx Queue Empty Count */
-#define E1000_ICTXQMTC 0x411C /* Interrupt Cause Tx Queue Minimum Threshold Count */
-#define E1000_ICRXDMTC 0x4120 /* Interrupt Cause Rx Descriptor Minimum Threshold Count */
-#define E1000_ICRXOC 0x4124 /* Interrupt Cause Receiver Overrun Count */
+#define E1000_IAC 0x04100 /* Interrupt Assertion Count */
+#define E1000_ICRXPTC 0x04104 /* Interrupt Cause Rx Packet Timer Expire Count */
+#define E1000_ICRXATC 0x04108 /* Interrupt Cause Rx Absolute Timer Expire Count */
+#define E1000_ICTXPTC 0x0410C /* Interrupt Cause Tx Packet Timer Expire Count */
+#define E1000_ICTXATC 0x04110 /* Interrupt Cause Tx Absolute Timer Expire Count */
+#define E1000_ICTXQEC 0x04118 /* Interrupt Cause Tx Queue Empty Count */
+#define E1000_ICTXQMTC 0x0411C /* Interrupt Cause Tx Queue Minimum Threshold Count */
+#define E1000_ICRXDMTC 0x04120 /* Interrupt Cause Rx Descriptor Minimum Threshold Count */
+#define E1000_ICRXOC 0x04124 /* Interrupt Cause Receiver Overrun Count */
#define E1000_RXCSUM 0x05000 /* RX Checksum Control - RW */
#define E1000_RFCTL 0x05008 /* Receive Filter Control*/
#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */
@@ -1018,6 +1041,14 @@ struct e1000_ffvt_entry {
#define E1000_FWSM 0x05B54 /* FW Semaphore */
#define E1000_FFLT_DBG 0x05F04 /* Debug Register */
#define E1000_HICR 0x08F00 /* Host Inteface Control */
+
+/* RSS registers */
+#define E1000_CPUVEC 0x02C10 /* CPU Vector Register - RW */
+#define E1000_MRQC 0x05818 /* Multiple Receive Control - RW */
+#define E1000_RETA 0x05C00 /* Redirection Table - RW Array */
+#define E1000_RSSRK 0x05C80 /* RSS Random Key - RW Array */
+#define E1000_RSSIM 0x05864 /* RSS Interrupt Mask */
+#define E1000_RSSIR 0x05868 /* RSS Interrupt Request */
/* Register Set (82542)
*
* Some of the 82542 registers are located at different offsets than they are
@@ -1032,6 +1063,7 @@ struct e1000_ffvt_entry {
#define E1000_82542_CTRL_EXT E1000_CTRL_EXT
#define E1000_82542_FLA E1000_FLA
#define E1000_82542_MDIC E1000_MDIC
+#define E1000_82542_SCTL E1000_SCTL
#define E1000_82542_FCAL E1000_FCAL
#define E1000_82542_FCAH E1000_FCAH
#define E1000_82542_FCT E1000_FCT
@@ -1049,6 +1081,18 @@ struct e1000_ffvt_entry {
#define E1000_82542_RDLEN 0x00118
#define E1000_82542_RDH 0x00120
#define E1000_82542_RDT 0x00128
+#define E1000_82542_RDTR0 E1000_82542_RDTR
+#define E1000_82542_RDBAL0 E1000_82542_RDBAL
+#define E1000_82542_RDBAH0 E1000_82542_RDBAH
+#define E1000_82542_RDLEN0 E1000_82542_RDLEN
+#define E1000_82542_RDH0 E1000_82542_RDH
+#define E1000_82542_RDT0 E1000_82542_RDT
+#define E1000_82542_RDTR1 0x00130
+#define E1000_82542_RDBAL1 0x00138
+#define E1000_82542_RDBAH1 0x0013C
+#define E1000_82542_RDLEN1 0x00140
+#define E1000_82542_RDH1 0x00148
+#define E1000_82542_RDT1 0x00150
#define E1000_82542_FCRTH 0x00160
#define E1000_82542_FCRTL 0x00168
#define E1000_82542_FCTTV E1000_FCTTV
@@ -1197,6 +1241,13 @@ struct e1000_ffvt_entry {
#define E1000_82542_ICRXOC E1000_ICRXOC
#define E1000_82542_HICR E1000_HICR
+#define E1000_82542_CPUVEC E1000_CPUVEC
+#define E1000_82542_MRQC E1000_MRQC
+#define E1000_82542_RETA E1000_RETA
+#define E1000_82542_RSSRK E1000_RSSRK
+#define E1000_82542_RSSIM E1000_RSSIM
+#define E1000_82542_RSSIR E1000_RSSIR
+
/* Statistics counters collected by the MAC */
struct e1000_hw_stats {
uint64_t crcerrs;
@@ -1336,6 +1387,7 @@ struct e1000_hw {
boolean_t serdes_link_down;
boolean_t tbi_compatibility_en;
boolean_t tbi_compatibility_on;
+ boolean_t laa_is_present;
boolean_t phy_reset_disable;
boolean_t fc_send_xon;
boolean_t fc_strict_ieee;
@@ -1374,6 +1426,7 @@ struct e1000_hw {
#define E1000_CTRL_BEM32 0x00000400 /* Big Endian 32 mode */
#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */
#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */
+#define E1000_CTRL_D_UD_EN 0x00002000 /* Dock/Undock enable */
#define E1000_CTRL_D_UD_POLARITY 0x00004000 /* Defined polarity of Dock/Undock indication in SDP[0] */
#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */
#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */
@@ -1491,6 +1544,8 @@ struct e1000_hw {
#define E1000_CTRL_EXT_WR_WMARK_320 0x01000000
#define E1000_CTRL_EXT_WR_WMARK_384 0x02000000
#define E1000_CTRL_EXT_WR_WMARK_448 0x03000000
+#define E1000_CTRL_EXT_CANC 0x04000000 /* Interrupt delay cancellation */
+#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */
#define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */
#define E1000_CTRL_EXT_INT_TIMER_CLR 0x20000000 /* Clear Interrupt timers after IMS clear */
@@ -1524,6 +1579,7 @@ struct e1000_hw {
#define E1000_LEDCTL_LED2_BLINK 0x00800000
#define E1000_LEDCTL_LED3_MODE_MASK 0x0F000000
#define E1000_LEDCTL_LED3_MODE_SHIFT 24
+#define E1000_LEDCTL_LED3_BLINK_RATE 0x20000000
#define E1000_LEDCTL_LED3_IVRT 0x40000000
#define E1000_LEDCTL_LED3_BLINK 0x80000000
@@ -1784,6 +1840,16 @@ struct e1000_hw {
#define E1000_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */
#define E1000_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */
+/* Multiple Receive Queue Control */
+#define E1000_MRQC_ENABLE_MASK 0x00000003
+#define E1000_MRQC_ENABLE_RSS_2Q 0x00000001
+#define E1000_MRQC_ENABLE_RSS_INT 0x00000004
+#define E1000_MRQC_RSS_FIELD_MASK 0xFFFF0000
+#define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000
+#define E1000_MRQC_RSS_FIELD_IPV4 0x00020000
+#define E1000_MRQC_RSS_FIELD_IPV6_TCP 0x00040000
+#define E1000_MRQC_RSS_FIELD_IPV6_EX 0x00080000
+#define E1000_MRQC_RSS_FIELD_IPV6 0x00100000
/* Definitions for power management and wakeup registers */
/* Wake Up Control */
@@ -1928,6 +1994,7 @@ struct e1000_host_command_info {
#define E1000_MDALIGN 4096
#define E1000_GCR_BEM32 0x00400000
+#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000
/* Function Active and Power State to MNG */
#define E1000_FACTPS_FUNC0_POWER_STATE_MASK 0x00000003
#define E1000_FACTPS_LAN0_VALID 0x00000004
@@ -1980,6 +2047,7 @@ struct e1000_host_command_info {
/* EEPROM Word Offsets */
#define EEPROM_COMPAT 0x0003
#define EEPROM_ID_LED_SETTINGS 0x0004
+#define EEPROM_VERSION 0x0005
#define EEPROM_SERDES_AMPLITUDE 0x0006 /* For SERDES output amplitude adjustment. */
#define EEPROM_PHY_CLASS_WORD 0x0007
#define EEPROM_INIT_CONTROL1_REG 0x000A
@@ -1990,6 +2058,8 @@ struct e1000_host_command_info {
#define EEPROM_FLASH_VERSION 0x0032
#define EEPROM_CHECKSUM_REG 0x003F
+#define E1000_EEPROM_CFG_DONE 0x00040000 /* MNG config cycle done */
+
/* Word definitions for ID LED Settings */
#define ID_LED_RESERVED_0000 0x0000
#define ID_LED_RESERVED_FFFF 0xFFFF
@@ -2108,6 +2178,8 @@ struct e1000_host_command_info {
#define E1000_PBA_22K 0x0016
#define E1000_PBA_24K 0x0018
#define E1000_PBA_30K 0x001E
+#define E1000_PBA_32K 0x0020
+#define E1000_PBA_38K 0x0026
#define E1000_PBA_40K 0x0028
#define E1000_PBA_48K 0x0030 /* 48KB, default RX allocation */
@@ -2592,11 +2664,11 @@ struct e1000_host_command_info {
/* 7 bits (3 Coarse + 4 Fine) --> 128 optional values */
#define IGP01E1000_AGC_LENGTH_TABLE_SIZE 128
-#define IGP02E1000_AGC_LENGTH_TABLE_SIZE 128
+#define IGP02E1000_AGC_LENGTH_TABLE_SIZE 113
/* The precision error of the cable length is +/- 10 meters */
#define IGP01E1000_AGC_RANGE 10
-#define IGP02E1000_AGC_RANGE 10
+#define IGP02E1000_AGC_RANGE 15
/* IGP01E1000 PCS Initialization register */
/* bits 3:6 in the PCS registers stores the channels polarity */
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index ee687c902a2..6b72f6acdd5 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -43,7 +43,7 @@ char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
#else
#define DRIVERNAPI "-NAPI"
#endif
-#define DRV_VERSION "6.0.60-k2"DRIVERNAPI
+#define DRV_VERSION "6.1.16-k2"DRIVERNAPI
char e1000_driver_version[] = DRV_VERSION;
char e1000_copyright[] = "Copyright (c) 1999-2005 Intel Corporation.";
@@ -80,6 +80,9 @@ static struct pci_device_id e1000_pci_tbl[] = {
INTEL_E1000_ETHERNET_DEVICE(0x1026),
INTEL_E1000_ETHERNET_DEVICE(0x1027),
INTEL_E1000_ETHERNET_DEVICE(0x1028),
+ INTEL_E1000_ETHERNET_DEVICE(0x105E),
+ INTEL_E1000_ETHERNET_DEVICE(0x105F),
+ INTEL_E1000_ETHERNET_DEVICE(0x1060),
INTEL_E1000_ETHERNET_DEVICE(0x1075),
INTEL_E1000_ETHERNET_DEVICE(0x1076),
INTEL_E1000_ETHERNET_DEVICE(0x1077),
@@ -88,10 +91,13 @@ static struct pci_device_id e1000_pci_tbl[] = {
INTEL_E1000_ETHERNET_DEVICE(0x107A),
INTEL_E1000_ETHERNET_DEVICE(0x107B),
INTEL_E1000_ETHERNET_DEVICE(0x107C),
+ INTEL_E1000_ETHERNET_DEVICE(0x107D),
+ INTEL_E1000_ETHERNET_DEVICE(0x107E),
+ INTEL_E1000_ETHERNET_DEVICE(0x107F),
INTEL_E1000_ETHERNET_DEVICE(0x108A),
INTEL_E1000_ETHERNET_DEVICE(0x108B),
INTEL_E1000_ETHERNET_DEVICE(0x108C),
- INTEL_E1000_ETHERNET_DEVICE(0x1099),
+ INTEL_E1000_ETHERNET_DEVICE(0x109A),
/* required last entry */
{0,}
};
@@ -102,10 +108,18 @@ int e1000_up(struct e1000_adapter *adapter);
void e1000_down(struct e1000_adapter *adapter);
void e1000_reset(struct e1000_adapter *adapter);
int e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx);
-int e1000_setup_tx_resources(struct e1000_adapter *adapter);
-int e1000_setup_rx_resources(struct e1000_adapter *adapter);
-void e1000_free_tx_resources(struct e1000_adapter *adapter);
-void e1000_free_rx_resources(struct e1000_adapter *adapter);
+int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
+int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
+void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
+void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
+int e1000_setup_tx_resources(struct e1000_adapter *adapter,
+ struct e1000_tx_ring *txdr);
+int e1000_setup_rx_resources(struct e1000_adapter *adapter,
+ struct e1000_rx_ring *rxdr);
+void e1000_free_tx_resources(struct e1000_adapter *adapter,
+ struct e1000_tx_ring *tx_ring);
+void e1000_free_rx_resources(struct e1000_adapter *adapter,
+ struct e1000_rx_ring *rx_ring);
void e1000_update_stats(struct e1000_adapter *adapter);
/* Local Function Prototypes */
@@ -114,14 +128,22 @@ static int e1000_init_module(void);
static void e1000_exit_module(void);
static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
static void __devexit e1000_remove(struct pci_dev *pdev);
+static int e1000_alloc_queues(struct e1000_adapter *adapter);
+#ifdef CONFIG_E1000_MQ
+static void e1000_setup_queue_mapping(struct e1000_adapter *adapter);
+#endif
static int e1000_sw_init(struct e1000_adapter *adapter);
static int e1000_open(struct net_device *netdev);
static int e1000_close(struct net_device *netdev);
static void e1000_configure_tx(struct e1000_adapter *adapter);
static void e1000_configure_rx(struct e1000_adapter *adapter);
static void e1000_setup_rctl(struct e1000_adapter *adapter);
-static void e1000_clean_tx_ring(struct e1000_adapter *adapter);
-static void e1000_clean_rx_ring(struct e1000_adapter *adapter);
+static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter);
+static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter);
+static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
+ struct e1000_tx_ring *tx_ring);
+static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
+ struct e1000_rx_ring *rx_ring);
static void e1000_set_multi(struct net_device *netdev);
static void e1000_update_phy_info(unsigned long data);
static void e1000_watchdog(unsigned long data);
@@ -132,19 +154,26 @@ static struct net_device_stats * e1000_get_stats(struct net_device *netdev);
static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
static int e1000_set_mac(struct net_device *netdev, void *p);
static irqreturn_t e1000_intr(int irq, void *data, struct pt_regs *regs);
-static boolean_t e1000_clean_tx_irq(struct e1000_adapter *adapter);
+static boolean_t e1000_clean_tx_irq(struct e1000_adapter *adapter,
+ struct e1000_tx_ring *tx_ring);
#ifdef CONFIG_E1000_NAPI
-static int e1000_clean(struct net_device *netdev, int *budget);
+static int e1000_clean(struct net_device *poll_dev, int *budget);
static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter,
+ struct e1000_rx_ring *rx_ring,
int *work_done, int work_to_do);
static boolean_t e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
+ struct e1000_rx_ring *rx_ring,
int *work_done, int work_to_do);
#else
-static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter);
-static boolean_t e1000_clean_rx_irq_ps(struct e1000_adapter *adapter);
+static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter,
+ struct e1000_rx_ring *rx_ring);
+static boolean_t e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
+ struct e1000_rx_ring *rx_ring);
#endif
-static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter);
-static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter);
+static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
+ struct e1000_rx_ring *rx_ring);
+static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
+ struct e1000_rx_ring *rx_ring);
static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
int cmd);
@@ -172,6 +201,11 @@ static int e1000_resume(struct pci_dev *pdev);
static void e1000_netpoll (struct net_device *netdev);
#endif
+#ifdef CONFIG_E1000_MQ
+/* for multiple Rx queues */
+void e1000_rx_schedule(void *data);
+#endif
+
/* Exported from other modules */
extern void e1000_check_options(struct e1000_adapter *adapter);
@@ -289,7 +323,7 @@ int
e1000_up(struct e1000_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
- int err;
+ int i, err;
/* hardware has been reset, we need to reload some things */
@@ -308,7 +342,8 @@ e1000_up(struct e1000_adapter *adapter)
e1000_configure_tx(adapter);
e1000_setup_rctl(adapter);
e1000_configure_rx(adapter);
- adapter->alloc_rx_buf(adapter);
+ for (i = 0; i < adapter->num_queues; i++)
+ adapter->alloc_rx_buf(adapter, &adapter->rx_ring[i]);
#ifdef CONFIG_PCI_MSI
if(adapter->hw.mac_type > e1000_82547_rev_2) {
@@ -344,6 +379,9 @@ e1000_down(struct e1000_adapter *adapter)
struct net_device *netdev = adapter->netdev;
e1000_irq_disable(adapter);
+#ifdef CONFIG_E1000_MQ
+ while (atomic_read(&adapter->rx_sched_call_data.count) != 0);
+#endif
free_irq(adapter->pdev->irq, netdev);
#ifdef CONFIG_PCI_MSI
if(adapter->hw.mac_type > e1000_82547_rev_2 &&
@@ -363,11 +401,10 @@ e1000_down(struct e1000_adapter *adapter)
netif_stop_queue(netdev);
e1000_reset(adapter);
- e1000_clean_tx_ring(adapter);
- e1000_clean_rx_ring(adapter);
+ e1000_clean_all_tx_rings(adapter);
+ e1000_clean_all_rx_rings(adapter);
- /* If WoL is not enabled
- * and management mode is not IAMT
+ /* If WoL is not enabled and management mode is not IAMT
* Power down the PHY so no link is implied when interface is down */
if(!adapter->wol && adapter->hw.mac_type >= e1000_82540 &&
adapter->hw.media_type == e1000_media_type_copper &&
@@ -398,6 +435,10 @@ e1000_reset(struct e1000_adapter *adapter)
case e1000_82547_rev_2:
pba = E1000_PBA_30K;
break;
+ case e1000_82571:
+ case e1000_82572:
+ pba = E1000_PBA_38K;
+ break;
case e1000_82573:
pba = E1000_PBA_12K;
break;
@@ -475,6 +516,7 @@ e1000_probe(struct pci_dev *pdev,
struct net_device *netdev;
struct e1000_adapter *adapter;
unsigned long mmio_start, mmio_len;
+ uint32_t ctrl_ext;
uint32_t swsm;
static int cards_found = 0;
@@ -614,8 +656,9 @@ e1000_probe(struct pci_dev *pdev,
if(e1000_read_mac_addr(&adapter->hw))
DPRINTK(PROBE, ERR, "EEPROM Read Error\n");
memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
+ memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len);
- if(!is_valid_ether_addr(netdev->dev_addr)) {
+ if(!is_valid_ether_addr(netdev->perm_addr)) {
DPRINTK(PROBE, ERR, "Invalid MAC Address\n");
err = -EIO;
goto err_eeprom;
@@ -687,6 +730,12 @@ e1000_probe(struct pci_dev *pdev,
/* Let firmware know the driver has taken over */
switch(adapter->hw.mac_type) {
+ case e1000_82571:
+ case e1000_82572:
+ ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
+ E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
+ ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
+ break;
case e1000_82573:
swsm = E1000_READ_REG(&adapter->hw, SWSM);
E1000_WRITE_REG(&adapter->hw, SWSM,
@@ -731,7 +780,11 @@ e1000_remove(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);
+ uint32_t ctrl_ext;
uint32_t manc, swsm;
+#ifdef CONFIG_E1000_NAPI
+ int i;
+#endif
flush_scheduled_work();
@@ -745,6 +798,12 @@ e1000_remove(struct pci_dev *pdev)
}
switch(adapter->hw.mac_type) {
+ case e1000_82571:
+ case e1000_82572:
+ ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
+ E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
+ ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
+ break;
case e1000_82573:
swsm = E1000_READ_REG(&adapter->hw, SWSM);
E1000_WRITE_REG(&adapter->hw, SWSM,
@@ -756,13 +815,27 @@ e1000_remove(struct pci_dev *pdev)
}
unregister_netdev(netdev);
+#ifdef CONFIG_E1000_NAPI
+ for (i = 0; i < adapter->num_queues; i++)
+ __dev_put(&adapter->polling_netdev[i]);
+#endif
if(!e1000_check_phy_reset_block(&adapter->hw))
e1000_phy_hw_reset(&adapter->hw);
+ kfree(adapter->tx_ring);
+ kfree(adapter->rx_ring);
+#ifdef CONFIG_E1000_NAPI
+ kfree(adapter->polling_netdev);
+#endif
+
iounmap(adapter->hw.hw_addr);
pci_release_regions(pdev);
+#ifdef CONFIG_E1000_MQ
+ free_percpu(adapter->cpu_netdev);
+ free_percpu(adapter->cpu_tx_ring);
+#endif
free_netdev(netdev);
pci_disable_device(pdev);
@@ -783,6 +856,9 @@ e1000_sw_init(struct e1000_adapter *adapter)
struct e1000_hw *hw = &adapter->hw;
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
+#ifdef CONFIG_E1000_NAPI
+ int i;
+#endif
/* PCI config space info */
@@ -840,14 +916,123 @@ e1000_sw_init(struct e1000_adapter *adapter)
hw->master_slave = E1000_MASTER_SLAVE;
}
+#ifdef CONFIG_E1000_MQ
+ /* Number of supported queues */
+ switch (hw->mac_type) {
+ case e1000_82571:
+ case e1000_82572:
+ adapter->num_queues = 2;
+ break;
+ default:
+ adapter->num_queues = 1;
+ break;
+ }
+ adapter->num_queues = min(adapter->num_queues, num_online_cpus());
+#else
+ adapter->num_queues = 1;
+#endif
+
+ if (e1000_alloc_queues(adapter)) {
+ DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n");
+ return -ENOMEM;
+ }
+
+#ifdef CONFIG_E1000_NAPI
+ for (i = 0; i < adapter->num_queues; i++) {
+ adapter->polling_netdev[i].priv = adapter;
+ adapter->polling_netdev[i].poll = &e1000_clean;
+ adapter->polling_netdev[i].weight = 64;
+ dev_hold(&adapter->polling_netdev[i]);
+ set_bit(__LINK_STATE_START, &adapter->polling_netdev[i].state);
+ }
+#endif
+
+#ifdef CONFIG_E1000_MQ
+ e1000_setup_queue_mapping(adapter);
+#endif
+
atomic_set(&adapter->irq_sem, 1);
spin_lock_init(&adapter->stats_lock);
- spin_lock_init(&adapter->tx_lock);
return 0;
}
/**
+ * e1000_alloc_queues - Allocate memory for all rings
+ * @adapter: board private structure to initialize
+ *
+ * We allocate one ring per queue at run-time since we don't know the
+ * number of queues at compile-time. The polling_netdev array is
+ * intended for Multiqueue, but should work fine with a single queue.
+ **/
+
+static int __devinit
+e1000_alloc_queues(struct e1000_adapter *adapter)
+{
+ int size;
+
+ size = sizeof(struct e1000_tx_ring) * adapter->num_queues;
+ adapter->tx_ring = kmalloc(size, GFP_KERNEL);
+ if (!adapter->tx_ring)
+ return -ENOMEM;
+ memset(adapter->tx_ring, 0, size);
+
+ size = sizeof(struct e1000_rx_ring) * adapter->num_queues;
+ adapter->rx_ring = kmalloc(size, GFP_KERNEL);
+ if (!adapter->rx_ring) {
+ kfree(adapter->tx_ring);
+ return -ENOMEM;
+ }
+ memset(adapter->rx_ring, 0, size);
+
+#ifdef CONFIG_E1000_NAPI
+ size = sizeof(struct net_device) * adapter->num_queues;
+ adapter->polling_netdev = kmalloc(size, GFP_KERNEL);
+ if (!adapter->polling_netdev) {
+ kfree(adapter->tx_ring);
+ kfree(adapter->rx_ring);
+ return -ENOMEM;
+ }
+ memset(adapter->polling_netdev, 0, size);
+#endif
+
+ return E1000_SUCCESS;
+}
+
+#ifdef CONFIG_E1000_MQ
+static void __devinit
+e1000_setup_queue_mapping(struct e1000_adapter *adapter)
+{
+ int i, cpu;
+
+ adapter->rx_sched_call_data.func = e1000_rx_schedule;
+ adapter->rx_sched_call_data.info = adapter->netdev;
+ cpus_clear(adapter->rx_sched_call_data.cpumask);
+
+ adapter->cpu_netdev = alloc_percpu(struct net_device *);
+ adapter->cpu_tx_ring = alloc_percpu(struct e1000_tx_ring *);
+
+ lock_cpu_hotplug();
+ i = 0;
+ for_each_online_cpu(cpu) {
+ *per_cpu_ptr(adapter->cpu_tx_ring, cpu) = &adapter->tx_ring[i % adapter->num_queues];
+ /* This is incomplete because we'd like to assign separate
+ * physical cpus to these netdev polling structures and
+ * avoid saturating a subset of cpus.
+ */
+ if (i < adapter->num_queues) {
+ *per_cpu_ptr(adapter->cpu_netdev, cpu) = &adapter->polling_netdev[i];
+ adapter->cpu_for_queue[i] = cpu;
+ } else
+ *per_cpu_ptr(adapter->cpu_netdev, cpu) = NULL;
+
+ i++;
+ }
+ unlock_cpu_hotplug();
+}
+#endif
+
+/**
* e1000_open - Called when a network interface is made active
* @netdev: network interface device structure
*
@@ -868,12 +1053,12 @@ e1000_open(struct net_device *netdev)
/* allocate transmit descriptors */
- if((err = e1000_setup_tx_resources(adapter)))
+ if ((err = e1000_setup_all_tx_resources(adapter)))
goto err_setup_tx;
/* allocate receive descriptors */
- if((err = e1000_setup_rx_resources(adapter)))
+ if ((err = e1000_setup_all_rx_resources(adapter)))
goto err_setup_rx;
if((err = e1000_up(adapter)))
@@ -887,9 +1072,9 @@ e1000_open(struct net_device *netdev)
return E1000_SUCCESS;
err_up:
- e1000_free_rx_resources(adapter);
+ e1000_free_all_rx_resources(adapter);
err_setup_rx:
- e1000_free_tx_resources(adapter);
+ e1000_free_all_tx_resources(adapter);
err_setup_tx:
e1000_reset(adapter);
@@ -915,8 +1100,8 @@ e1000_close(struct net_device *netdev)
e1000_down(adapter);
- e1000_free_tx_resources(adapter);
- e1000_free_rx_resources(adapter);
+ e1000_free_all_tx_resources(adapter);
+ e1000_free_all_rx_resources(adapter);
if((adapter->hw.mng_cookie.status &
E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
@@ -951,14 +1136,15 @@ e1000_check_64k_bound(struct e1000_adapter *adapter,
/**
* e1000_setup_tx_resources - allocate Tx resources (Descriptors)
* @adapter: board private structure
+ * @txdr: tx descriptor ring (for a specific queue) to setup
*
* Return 0 on success, negative on failure
**/
int
-e1000_setup_tx_resources(struct e1000_adapter *adapter)
+e1000_setup_tx_resources(struct e1000_adapter *adapter,
+ struct e1000_tx_ring *txdr)
{
- struct e1000_desc_ring *txdr = &adapter->tx_ring;
struct pci_dev *pdev = adapter->pdev;
int size;
@@ -970,6 +1156,7 @@ e1000_setup_tx_resources(struct e1000_adapter *adapter)
return -ENOMEM;
}
memset(txdr->buffer_info, 0, size);
+ memset(&txdr->previous_buffer_info, 0, sizeof(struct e1000_buffer));
/* round up to nearest 4K */
@@ -1018,11 +1205,41 @@ setup_tx_desc_die:
txdr->next_to_use = 0;
txdr->next_to_clean = 0;
+ spin_lock_init(&txdr->tx_lock);
return 0;
}
/**
+ * e1000_setup_all_tx_resources - wrapper to allocate Tx resources
+ * (Descriptors) for all queues
+ * @adapter: board private structure
+ *
+ * If this function returns with an error, then it's possible one or
+ * more of the rings is populated (while the rest are not). It is the
+ * callers duty to clean those orphaned rings.
+ *
+ * Return 0 on success, negative on failure
+ **/
+
+int
+e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
+{
+ int i, err = 0;
+
+ for (i = 0; i < adapter->num_queues; i++) {
+ err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
+ if (err) {
+ DPRINTK(PROBE, ERR,
+ "Allocation for Tx Queue %u failed\n", i);
+ break;
+ }
+ }
+
+ return err;
+}
+
+/**
* e1000_configure_tx - Configure 8254x Transmit Unit after Reset
* @adapter: board private structure
*
@@ -1032,23 +1249,43 @@ setup_tx_desc_die:
static void
e1000_configure_tx(struct e1000_adapter *adapter)
{
- uint64_t tdba = adapter->tx_ring.dma;
- uint32_t tdlen = adapter->tx_ring.count * sizeof(struct e1000_tx_desc);
- uint32_t tctl, tipg;
-
- E1000_WRITE_REG(&adapter->hw, TDBAL, (tdba & 0x00000000ffffffffULL));
- E1000_WRITE_REG(&adapter->hw, TDBAH, (tdba >> 32));
-
- E1000_WRITE_REG(&adapter->hw, TDLEN, tdlen);
+ uint64_t tdba;
+ struct e1000_hw *hw = &adapter->hw;
+ uint32_t tdlen, tctl, tipg, tarc;
/* Setup the HW Tx Head and Tail descriptor pointers */
- E1000_WRITE_REG(&adapter->hw, TDH, 0);
- E1000_WRITE_REG(&adapter->hw, TDT, 0);
+ switch (adapter->num_queues) {
+ case 2:
+ tdba = adapter->tx_ring[1].dma;
+ tdlen = adapter->tx_ring[1].count *
+ sizeof(struct e1000_tx_desc);
+ E1000_WRITE_REG(hw, TDBAL1, (tdba & 0x00000000ffffffffULL));
+ E1000_WRITE_REG(hw, TDBAH1, (tdba >> 32));
+ E1000_WRITE_REG(hw, TDLEN1, tdlen);
+ E1000_WRITE_REG(hw, TDH1, 0);
+ E1000_WRITE_REG(hw, TDT1, 0);
+ adapter->tx_ring[1].tdh = E1000_TDH1;
+ adapter->tx_ring[1].tdt = E1000_TDT1;
+ /* Fall Through */
+ case 1:
+ default:
+ tdba = adapter->tx_ring[0].dma;
+ tdlen = adapter->tx_ring[0].count *
+ sizeof(struct e1000_tx_desc);
+ E1000_WRITE_REG(hw, TDBAL, (tdba & 0x00000000ffffffffULL));
+ E1000_WRITE_REG(hw, TDBAH, (tdba >> 32));
+ E1000_WRITE_REG(hw, TDLEN, tdlen);
+ E1000_WRITE_REG(hw, TDH, 0);
+ E1000_WRITE_REG(hw, TDT, 0);
+ adapter->tx_ring[0].tdh = E1000_TDH;
+ adapter->tx_ring[0].tdt = E1000_TDT;
+ break;
+ }
/* Set the default values for the Tx Inter Packet Gap timer */
- switch (adapter->hw.mac_type) {
+ switch (hw->mac_type) {
case e1000_82542_rev2_0:
case e1000_82542_rev2_1:
tipg = DEFAULT_82542_TIPG_IPGT;
@@ -1056,67 +1293,81 @@ e1000_configure_tx(struct e1000_adapter *adapter)
tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
break;
default:
- if(adapter->hw.media_type == e1000_media_type_fiber ||
- adapter->hw.media_type == e1000_media_type_internal_serdes)
+ if (hw->media_type == e1000_media_type_fiber ||
+ hw->media_type == e1000_media_type_internal_serdes)
tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
else
tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
}
- E1000_WRITE_REG(&adapter->hw, TIPG, tipg);
+ E1000_WRITE_REG(hw, TIPG, tipg);
/* Set the Tx Interrupt Delay register */
- E1000_WRITE_REG(&adapter->hw, TIDV, adapter->tx_int_delay);
- if(adapter->hw.mac_type >= e1000_82540)
- E1000_WRITE_REG(&adapter->hw, TADV, adapter->tx_abs_int_delay);
+ E1000_WRITE_REG(hw, TIDV, adapter->tx_int_delay);
+ if (hw->mac_type >= e1000_82540)
+ E1000_WRITE_REG(hw, TADV, adapter->tx_abs_int_delay);
/* Program the Transmit Control Register */
- tctl = E1000_READ_REG(&adapter->hw, TCTL);
+ tctl = E1000_READ_REG(hw, TCTL);
tctl &= ~E1000_TCTL_CT;
- tctl |= E1000_TCTL_EN | E1000_TCTL_PSP |
+ tctl |= E1000_TCTL_EN | E1000_TCTL_PSP | E1000_TCTL_RTLC |
(E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
- E1000_WRITE_REG(&adapter->hw, TCTL, tctl);
+ E1000_WRITE_REG(hw, TCTL, tctl);
- e1000_config_collision_dist(&adapter->hw);
+ if (hw->mac_type == e1000_82571 || hw->mac_type == e1000_82572) {
+ tarc = E1000_READ_REG(hw, TARC0);
+ tarc |= ((1 << 25) | (1 << 21));
+ E1000_WRITE_REG(hw, TARC0, tarc);
+ tarc = E1000_READ_REG(hw, TARC1);
+ tarc |= (1 << 25);
+ if (tctl & E1000_TCTL_MULR)
+ tarc &= ~(1 << 28);
+ else
+ tarc |= (1 << 28);
+ E1000_WRITE_REG(hw, TARC1, tarc);
+ }
+
+ e1000_config_collision_dist(hw);
/* Setup Transmit Descriptor Settings for eop descriptor */
adapter->txd_cmd = E1000_TXD_CMD_IDE | E1000_TXD_CMD_EOP |
E1000_TXD_CMD_IFCS;
- if(adapter->hw.mac_type < e1000_82543)
+ if (hw->mac_type < e1000_82543)
adapter->txd_cmd |= E1000_TXD_CMD_RPS;
else
adapter->txd_cmd |= E1000_TXD_CMD_RS;
/* Cache if we're 82544 running in PCI-X because we'll
* need this to apply a workaround later in the send path. */
- if(adapter->hw.mac_type == e1000_82544 &&
- adapter->hw.bus_type == e1000_bus_type_pcix)
+ if (hw->mac_type == e1000_82544 &&
+ hw->bus_type == e1000_bus_type_pcix)
adapter->pcix_82544 = 1;
}
/**
* e1000_setup_rx_resources - allocate Rx resources (Descriptors)
* @adapter: board private structure
+ * @rxdr: rx descriptor ring (for a specific queue) to setup
*
* Returns 0 on success, negative on failure
**/
int
-e1000_setup_rx_resources(struct e1000_adapter *adapter)
+e1000_setup_rx_resources(struct e1000_adapter *adapter,
+ struct e1000_rx_ring *rxdr)
{
- struct e1000_desc_ring *rxdr = &adapter->rx_ring;
struct pci_dev *pdev = adapter->pdev;
int size, desc_len;
size = sizeof(struct e1000_buffer) * rxdr->count;
rxdr->buffer_info = vmalloc(size);
- if(!rxdr->buffer_info) {
+ if (!rxdr->buffer_info) {
DPRINTK(PROBE, ERR,
"Unable to allocate memory for the receive descriptor ring\n");
return -ENOMEM;
@@ -1156,13 +1407,13 @@ e1000_setup_rx_resources(struct e1000_adapter *adapter)
rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
- if(!rxdr->desc) {
+ if (!rxdr->desc) {
+ DPRINTK(PROBE, ERR,
+ "Unable to allocate memory for the receive descriptor ring\n");
setup_rx_desc_die:
vfree(rxdr->buffer_info);
kfree(rxdr->ps_page);
kfree(rxdr->ps_page_dma);
- DPRINTK(PROBE, ERR,
- "Unable to allocate memory for the receive descriptor ring\n");
return -ENOMEM;
}
@@ -1174,9 +1425,12 @@ setup_rx_desc_die:
"at %p\n", rxdr->size, rxdr->desc);
/* Try again, without freeing the previous */
rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
- if(!rxdr->desc) {
/* Failed allocation, critical failure */
+ if (!rxdr->desc) {
pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
+ DPRINTK(PROBE, ERR,
+ "Unable to allocate memory "
+ "for the receive descriptor ring\n");
goto setup_rx_desc_die;
}
@@ -1188,10 +1442,7 @@ setup_rx_desc_die:
DPRINTK(PROBE, ERR,
"Unable to allocate aligned memory "
"for the receive descriptor ring\n");
- vfree(rxdr->buffer_info);
- kfree(rxdr->ps_page);
- kfree(rxdr->ps_page_dma);
- return -ENOMEM;
+ goto setup_rx_desc_die;
} else {
/* Free old allocation, new allocation was successful */
pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
@@ -1206,15 +1457,48 @@ setup_rx_desc_die:
}
/**
+ * e1000_setup_all_rx_resources - wrapper to allocate Rx resources
+ * (Descriptors) for all queues
+ * @adapter: board private structure
+ *
+ * If this function returns with an error, then it's possible one or
+ * more of the rings is populated (while the rest are not). It is the
+ * callers duty to clean those orphaned rings.
+ *
+ * Return 0 on success, negative on failure
+ **/
+
+int
+e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
+{
+ int i, err = 0;
+
+ for (i = 0; i < adapter->num_queues; i++) {
+ err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
+ if (err) {
+ DPRINTK(PROBE, ERR,
+ "Allocation for Rx Queue %u failed\n", i);
+ break;
+ }
+ }
+
+ return err;
+}
+
+/**
* e1000_setup_rctl - configure the receive control registers
* @adapter: Board private structure
**/
-
+#define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
+ (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
static void
e1000_setup_rctl(struct e1000_adapter *adapter)
{
uint32_t rctl, rfctl;
uint32_t psrctl = 0;
+#ifdef CONFIG_E1000_PACKET_SPLIT
+ uint32_t pages = 0;
+#endif
rctl = E1000_READ_REG(&adapter->hw, RCTL);
@@ -1235,7 +1519,7 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
rctl |= E1000_RCTL_LPE;
/* Setup buffer sizes */
- if(adapter->hw.mac_type == e1000_82573) {
+ if(adapter->hw.mac_type >= e1000_82571) {
/* We can now specify buffers in 1K increments.
* BSIZE and BSEX are ignored in this case. */
rctl |= adapter->rx_buffer_len << 0x11;
@@ -1268,11 +1552,14 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
* followed by the page buffers. Therefore, skb->data is
* sized to hold the largest protocol header.
*/
- adapter->rx_ps = (adapter->hw.mac_type > e1000_82547_rev_2)
- && (adapter->netdev->mtu
- < ((3 * PAGE_SIZE) + adapter->rx_ps_bsize0));
+ pages = PAGE_USE_COUNT(adapter->netdev->mtu);
+ if ((adapter->hw.mac_type > e1000_82547_rev_2) && (pages <= 3) &&
+ PAGE_SIZE <= 16384)
+ adapter->rx_ps_pages = pages;
+ else
+ adapter->rx_ps_pages = 0;
#endif
- if(adapter->rx_ps) {
+ if (adapter->rx_ps_pages) {
/* Configure extra packet-split registers */
rfctl = E1000_READ_REG(&adapter->hw, RFCTL);
rfctl |= E1000_RFCTL_EXTEN;
@@ -1284,12 +1571,19 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
psrctl |= adapter->rx_ps_bsize0 >>
E1000_PSRCTL_BSIZE0_SHIFT;
- psrctl |= PAGE_SIZE >>
- E1000_PSRCTL_BSIZE1_SHIFT;
- psrctl |= PAGE_SIZE <<
- E1000_PSRCTL_BSIZE2_SHIFT;
- psrctl |= PAGE_SIZE <<
- E1000_PSRCTL_BSIZE3_SHIFT;
+
+ switch (adapter->rx_ps_pages) {
+ case 3:
+ psrctl |= PAGE_SIZE <<
+ E1000_PSRCTL_BSIZE3_SHIFT;
+ case 2:
+ psrctl |= PAGE_SIZE <<
+ E1000_PSRCTL_BSIZE2_SHIFT;
+ case 1:
+ psrctl |= PAGE_SIZE >>
+ E1000_PSRCTL_BSIZE1_SHIFT;
+ break;
+ }
E1000_WRITE_REG(&adapter->hw, PSRCTL, psrctl);
}
@@ -1307,91 +1601,181 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
static void
e1000_configure_rx(struct e1000_adapter *adapter)
{
- uint64_t rdba = adapter->rx_ring.dma;
- uint32_t rdlen, rctl, rxcsum;
+ uint64_t rdba;
+ struct e1000_hw *hw = &adapter->hw;
+ uint32_t rdlen, rctl, rxcsum, ctrl_ext;
+#ifdef CONFIG_E1000_MQ
+ uint32_t reta, mrqc;
+ int i;
+#endif
- if(adapter->rx_ps) {
- rdlen = adapter->rx_ring.count *
+ if (adapter->rx_ps_pages) {
+ rdlen = adapter->rx_ring[0].count *
sizeof(union e1000_rx_desc_packet_split);
adapter->clean_rx = e1000_clean_rx_irq_ps;
adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps;
} else {
- rdlen = adapter->rx_ring.count * sizeof(struct e1000_rx_desc);
+ rdlen = adapter->rx_ring[0].count *
+ sizeof(struct e1000_rx_desc);
adapter->clean_rx = e1000_clean_rx_irq;
adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
}
/* disable receives while setting up the descriptors */
- rctl = E1000_READ_REG(&adapter->hw, RCTL);
- E1000_WRITE_REG(&adapter->hw, RCTL, rctl & ~E1000_RCTL_EN);
+ rctl = E1000_READ_REG(hw, RCTL);
+ E1000_WRITE_REG(hw, RCTL, rctl & ~E1000_RCTL_EN);
/* set the Receive Delay Timer Register */
- E1000_WRITE_REG(&adapter->hw, RDTR, adapter->rx_int_delay);
+ E1000_WRITE_REG(hw, RDTR, adapter->rx_int_delay);
- if(adapter->hw.mac_type >= e1000_82540) {
- E1000_WRITE_REG(&adapter->hw, RADV, adapter->rx_abs_int_delay);
+ if (hw->mac_type >= e1000_82540) {
+ E1000_WRITE_REG(hw, RADV, adapter->rx_abs_int_delay);
if(adapter->itr > 1)
- E1000_WRITE_REG(&adapter->hw, ITR,
+ E1000_WRITE_REG(hw, ITR,
1000000000 / (adapter->itr * 256));
}
- /* Setup the Base and Length of the Rx Descriptor Ring */
- E1000_WRITE_REG(&adapter->hw, RDBAL, (rdba & 0x00000000ffffffffULL));
- E1000_WRITE_REG(&adapter->hw, RDBAH, (rdba >> 32));
+ if (hw->mac_type >= e1000_82571) {
+ /* Reset delay timers after every interrupt */
+ ctrl_ext = E1000_READ_REG(hw, CTRL_EXT);
+ ctrl_ext |= E1000_CTRL_EXT_CANC;
+ E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext);
+ E1000_WRITE_FLUSH(hw);
+ }
+
+ /* Setup the HW Rx Head and Tail Descriptor Pointers and
+ * the Base and Length of the Rx Descriptor Ring */
+ switch (adapter->num_queues) {
+#ifdef CONFIG_E1000_MQ
+ case 2:
+ rdba = adapter->rx_ring[1].dma;
+ E1000_WRITE_REG(hw, RDBAL1, (rdba & 0x00000000ffffffffULL));
+ E1000_WRITE_REG(hw, RDBAH1, (rdba >> 32));
+ E1000_WRITE_REG(hw, RDLEN1, rdlen);
+ E1000_WRITE_REG(hw, RDH1, 0);
+ E1000_WRITE_REG(hw, RDT1, 0);
+ adapter->rx_ring[1].rdh = E1000_RDH1;
+ adapter->rx_ring[1].rdt = E1000_RDT1;
+ /* Fall Through */
+#endif
+ case 1:
+ default:
+ rdba = adapter->rx_ring[0].dma;
+ E1000_WRITE_REG(hw, RDBAL, (rdba & 0x00000000ffffffffULL));
+ E1000_WRITE_REG(hw, RDBAH, (rdba >> 32));
+ E1000_WRITE_REG(hw, RDLEN, rdlen);
+ E1000_WRITE_REG(hw, RDH, 0);
+ E1000_WRITE_REG(hw, RDT, 0);
+ adapter->rx_ring[0].rdh = E1000_RDH;
+ adapter->rx_ring[0].rdt = E1000_RDT;
+ break;
+ }
+
+#ifdef CONFIG_E1000_MQ
+ if (adapter->num_queues > 1) {
+ uint32_t random[10];
+
+ get_random_bytes(&random[0], 40);
+
+ if (hw->mac_type <= e1000_82572) {
+ E1000_WRITE_REG(hw, RSSIR, 0);
+ E1000_WRITE_REG(hw, RSSIM, 0);
+ }
+
+ switch (adapter->num_queues) {
+ case 2:
+ default:
+ reta = 0x00800080;
+ mrqc = E1000_MRQC_ENABLE_RSS_2Q;
+ break;
+ }
+
+ /* Fill out redirection table */
+ for (i = 0; i < 32; i++)
+ E1000_WRITE_REG_ARRAY(hw, RETA, i, reta);
+ /* Fill out hash function seeds */
+ for (i = 0; i < 10; i++)
+ E1000_WRITE_REG_ARRAY(hw, RSSRK, i, random[i]);
+
+ mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 |
+ E1000_MRQC_RSS_FIELD_IPV4_TCP);
+ E1000_WRITE_REG(hw, MRQC, mrqc);
+ }
- E1000_WRITE_REG(&adapter->hw, RDLEN, rdlen);
+ /* Multiqueue and packet checksumming are mutually exclusive. */
+ if (hw->mac_type >= e1000_82571) {
+ rxcsum = E1000_READ_REG(hw, RXCSUM);
+ rxcsum |= E1000_RXCSUM_PCSD;
+ E1000_WRITE_REG(hw, RXCSUM, rxcsum);
+ }
- /* Setup the HW Rx Head and Tail Descriptor Pointers */
- E1000_WRITE_REG(&adapter->hw, RDH, 0);
- E1000_WRITE_REG(&adapter->hw, RDT, 0);
+#else
/* Enable 82543 Receive Checksum Offload for TCP and UDP */
- if(adapter->hw.mac_type >= e1000_82543) {
- rxcsum = E1000_READ_REG(&adapter->hw, RXCSUM);
+ if (hw->mac_type >= e1000_82543) {
+ rxcsum = E1000_READ_REG(hw, RXCSUM);
if(adapter->rx_csum == TRUE) {
rxcsum |= E1000_RXCSUM_TUOFL;
- /* Enable 82573 IPv4 payload checksum for UDP fragments
+ /* Enable 82571 IPv4 payload checksum for UDP fragments
* Must be used in conjunction with packet-split. */
- if((adapter->hw.mac_type > e1000_82547_rev_2) &&
- (adapter->rx_ps)) {
+ if ((hw->mac_type >= e1000_82571) &&
+ (adapter->rx_ps_pages)) {
rxcsum |= E1000_RXCSUM_IPPCSE;
}
} else {
rxcsum &= ~E1000_RXCSUM_TUOFL;
/* don't need to clear IPPCSE as it defaults to 0 */
}
- E1000_WRITE_REG(&adapter->hw, RXCSUM, rxcsum);
+ E1000_WRITE_REG(hw, RXCSUM, rxcsum);
}
+#endif /* CONFIG_E1000_MQ */
- if (adapter->hw.mac_type == e1000_82573)
- E1000_WRITE_REG(&adapter->hw, ERT, 0x0100);
+ if (hw->mac_type == e1000_82573)
+ E1000_WRITE_REG(hw, ERT, 0x0100);
/* Enable Receives */
- E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
+ E1000_WRITE_REG(hw, RCTL, rctl);
}
/**
- * e1000_free_tx_resources - Free Tx Resources
+ * e1000_free_tx_resources - Free Tx Resources per Queue
* @adapter: board private structure
+ * @tx_ring: Tx descriptor ring for a specific queue
*
* Free all transmit software resources
**/
void
-e1000_free_tx_resources(struct e1000_adapter *adapter)
+e1000_free_tx_resources(struct e1000_adapter *adapter,
+ struct e1000_tx_ring *tx_ring)
{
struct pci_dev *pdev = adapter->pdev;
- e1000_clean_tx_ring(adapter);
+ e1000_clean_tx_ring(adapter, tx_ring);
+
+ vfree(tx_ring->buffer_info);
+ tx_ring->buffer_info = NULL;
- vfree(adapter->tx_ring.buffer_info);
- adapter->tx_ring.buffer_info = NULL;
+ pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
+
+ tx_ring->desc = NULL;
+}
- pci_free_consistent(pdev, adapter->tx_ring.size,
- adapter->tx_ring.desc, adapter->tx_ring.dma);
+/**
+ * e1000_free_all_tx_resources - Free Tx Resources for All Queues
+ * @adapter: board private structure
+ *
+ * Free all transmit software resources
+ **/
+
+void
+e1000_free_all_tx_resources(struct e1000_adapter *adapter)
+{
+ int i;
- adapter->tx_ring.desc = NULL;
+ for (i = 0; i < adapter->num_queues; i++)
+ e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
}
static inline void
@@ -1414,21 +1798,22 @@ e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
/**
* e1000_clean_tx_ring - Free Tx Buffers
* @adapter: board private structure
+ * @tx_ring: ring to be cleaned
**/
static void
-e1000_clean_tx_ring(struct e1000_adapter *adapter)
+e1000_clean_tx_ring(struct e1000_adapter *adapter,
+ struct e1000_tx_ring *tx_ring)
{
- struct e1000_desc_ring *tx_ring = &adapter->tx_ring;
struct e1000_buffer *buffer_info;
unsigned long size;
unsigned int i;
/* Free all the Tx ring sk_buffs */
- if (likely(adapter->previous_buffer_info.skb != NULL)) {
+ if (likely(tx_ring->previous_buffer_info.skb != NULL)) {
e1000_unmap_and_free_tx_resource(adapter,
- &adapter->previous_buffer_info);
+ &tx_ring->previous_buffer_info);
}
for(i = 0; i < tx_ring->count; i++) {
@@ -1446,24 +1831,39 @@ e1000_clean_tx_ring(struct e1000_adapter *adapter)
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
- E1000_WRITE_REG(&adapter->hw, TDH, 0);
- E1000_WRITE_REG(&adapter->hw, TDT, 0);
+ writel(0, adapter->hw.hw_addr + tx_ring->tdh);
+ writel(0, adapter->hw.hw_addr + tx_ring->tdt);
+}
+
+/**
+ * e1000_clean_all_tx_rings - Free Tx Buffers for all queues
+ * @adapter: board private structure
+ **/
+
+static void
+e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_queues; i++)
+ e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
}
/**
* e1000_free_rx_resources - Free Rx Resources
* @adapter: board private structure
+ * @rx_ring: ring to clean the resources from
*
* Free all receive software resources
**/
void
-e1000_free_rx_resources(struct e1000_adapter *adapter)
+e1000_free_rx_resources(struct e1000_adapter *adapter,
+ struct e1000_rx_ring *rx_ring)
{
- struct e1000_desc_ring *rx_ring = &adapter->rx_ring;
struct pci_dev *pdev = adapter->pdev;
- e1000_clean_rx_ring(adapter);
+ e1000_clean_rx_ring(adapter, rx_ring);
vfree(rx_ring->buffer_info);
rx_ring->buffer_info = NULL;
@@ -1478,14 +1878,31 @@ e1000_free_rx_resources(struct e1000_adapter *adapter)
}
/**
- * e1000_clean_rx_ring - Free Rx Buffers
+ * e1000_free_all_rx_resources - Free Rx Resources for All Queues
+ * @adapter: board private structure
+ *
+ * Free all receive software resources
+ **/
+
+void
+e1000_free_all_rx_resources(struct e1000_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_queues; i++)
+ e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
+}
+
+/**
+ * e1000_clean_rx_ring - Free Rx Buffers per Queue
* @adapter: board private structure
+ * @rx_ring: ring to free buffers from
**/
static void
-e1000_clean_rx_ring(struct e1000_adapter *adapter)
+e1000_clean_rx_ring(struct e1000_adapter *adapter,
+ struct e1000_rx_ring *rx_ring)
{
- struct e1000_desc_ring *rx_ring = &adapter->rx_ring;
struct e1000_buffer *buffer_info;
struct e1000_ps_page *ps_page;
struct e1000_ps_page_dma *ps_page_dma;
@@ -1508,7 +1925,7 @@ e1000_clean_rx_ring(struct e1000_adapter *adapter)
dev_kfree_skb(buffer_info->skb);
buffer_info->skb = NULL;
- for(j = 0; j < PS_PAGE_BUFFERS; j++) {
+ for(j = 0; j < adapter->rx_ps_pages; j++) {
if(!ps_page->ps_page[j]) break;
pci_unmap_single(pdev,
ps_page_dma->ps_page_dma[j],
@@ -1534,8 +1951,22 @@ e1000_clean_rx_ring(struct e1000_adapter *adapter)
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
- E1000_WRITE_REG(&adapter->hw, RDH, 0);
- E1000_WRITE_REG(&adapter->hw, RDT, 0);
+ writel(0, adapter->hw.hw_addr + rx_ring->rdh);
+ writel(0, adapter->hw.hw_addr + rx_ring->rdt);
+}
+
+/**
+ * e1000_clean_all_rx_rings - Free Rx Buffers for all queues
+ * @adapter: board private structure
+ **/
+
+static void
+e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_queues; i++)
+ e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
}
/* The 82542 2.0 (revision 2) needs to have the receive unit in reset
@@ -1556,7 +1987,7 @@ e1000_enter_82542_rst(struct e1000_adapter *adapter)
mdelay(5);
if(netif_running(netdev))
- e1000_clean_rx_ring(adapter);
+ e1000_clean_all_rx_rings(adapter);
}
static void
@@ -1576,7 +2007,7 @@ e1000_leave_82542_rst(struct e1000_adapter *adapter)
if(netif_running(netdev)) {
e1000_configure_rx(adapter);
- e1000_alloc_rx_buffers(adapter);
+ e1000_alloc_rx_buffers(adapter, &adapter->rx_ring[0]);
}
}
@@ -1607,6 +2038,22 @@ e1000_set_mac(struct net_device *netdev, void *p)
e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, 0);
+ /* With 82571 controllers, LAA may be overwritten (with the default)
+ * due to controller reset from the other port. */
+ if (adapter->hw.mac_type == e1000_82571) {
+ /* activate the work around */
+ adapter->hw.laa_is_present = 1;
+
+ /* Hold a copy of the LAA in RAR[14] This is done so that
+ * between the time RAR[0] gets clobbered and the time it
+ * gets fixed (in e1000_watchdog), the actual LAA is in one
+ * of the RARs and no incoming packets directed to this port
+ * are dropped. Eventaully the LAA will be in RAR[0] and
+ * RAR[14] */
+ e1000_rar_set(&adapter->hw, adapter->hw.mac_addr,
+ E1000_RAR_ENTRIES - 1);
+ }
+
if(adapter->hw.mac_type == e1000_82542_rev2_0)
e1000_leave_82542_rst(adapter);
@@ -1629,12 +2076,13 @@ e1000_set_multi(struct net_device *netdev)
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
struct dev_mc_list *mc_ptr;
- unsigned long flags;
uint32_t rctl;
uint32_t hash_value;
- int i;
+ int i, rar_entries = E1000_RAR_ENTRIES;
- spin_lock_irqsave(&adapter->tx_lock, flags);
+ /* reserve RAR[14] for LAA over-write work-around */
+ if (adapter->hw.mac_type == e1000_82571)
+ rar_entries--;
/* Check for Promiscuous and All Multicast modes */
@@ -1659,11 +2107,12 @@ e1000_set_multi(struct net_device *netdev)
/* load the first 14 multicast address into the exact filters 1-14
* RAR 0 is used for the station MAC adddress
* if there are not 14 addresses, go ahead and clear the filters
+ * -- with 82571 controllers only 0-13 entries are filled here
*/
mc_ptr = netdev->mc_list;
- for(i = 1; i < E1000_RAR_ENTRIES; i++) {
- if(mc_ptr) {
+ for(i = 1; i < rar_entries; i++) {
+ if (mc_ptr) {
e1000_rar_set(hw, mc_ptr->dmi_addr, i);
mc_ptr = mc_ptr->next;
} else {
@@ -1686,8 +2135,6 @@ e1000_set_multi(struct net_device *netdev)
if(hw->mac_type == e1000_82542_rev2_0)
e1000_leave_82542_rst(adapter);
-
- spin_unlock_irqrestore(&adapter->tx_lock, flags);
}
/* Need to wait a few seconds after link up to get diagnostic information from
@@ -1759,7 +2206,7 @@ static void
e1000_watchdog_task(struct e1000_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
- struct e1000_desc_ring *txdr = &adapter->tx_ring;
+ struct e1000_tx_ring *txdr = &adapter->tx_ring[0];
uint32_t link;
e1000_check_for_link(&adapter->hw);
@@ -1818,8 +2265,8 @@ e1000_watchdog_task(struct e1000_adapter *adapter)
e1000_update_adaptive(&adapter->hw);
- if(!netif_carrier_ok(netdev)) {
- if(E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
+ if (adapter->num_queues == 1 && !netif_carrier_ok(netdev)) {
+ if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
/* We've lost link, so the controller stops DMA,
* but we've got queued Tx work that's never going
* to get done, so reset controller to flush Tx.
@@ -1847,6 +2294,11 @@ e1000_watchdog_task(struct e1000_adapter *adapter)
/* Force detection of hung controller every watchdog period */
adapter->detect_tx_hung = TRUE;
+ /* With 82571 controllers, LAA may be overwritten due to controller
+ * reset from the other port. Set the appropriate LAA in RAR[0] */
+ if (adapter->hw.mac_type == e1000_82571 && adapter->hw.laa_is_present)
+ e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, 0);
+
/* Reset the timer */
mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
}
@@ -1859,7 +2311,8 @@ e1000_watchdog_task(struct e1000_adapter *adapter)
#define E1000_TX_FLAGS_VLAN_SHIFT 16
static inline int
-e1000_tso(struct e1000_adapter *adapter, struct sk_buff *skb)
+e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
+ struct sk_buff *skb)
{
#ifdef NETIF_F_TSO
struct e1000_context_desc *context_desc;
@@ -1910,8 +2363,8 @@ e1000_tso(struct e1000_adapter *adapter, struct sk_buff *skb)
cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
- i = adapter->tx_ring.next_to_use;
- context_desc = E1000_CONTEXT_DESC(adapter->tx_ring, i);
+ i = tx_ring->next_to_use;
+ context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
context_desc->lower_setup.ip_fields.ipcss = ipcss;
context_desc->lower_setup.ip_fields.ipcso = ipcso;
@@ -1923,8 +2376,8 @@ e1000_tso(struct e1000_adapter *adapter, struct sk_buff *skb)
context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
context_desc->cmd_and_length = cpu_to_le32(cmd_length);
- if(++i == adapter->tx_ring.count) i = 0;
- adapter->tx_ring.next_to_use = i;
+ if (++i == tx_ring->count) i = 0;
+ tx_ring->next_to_use = i;
return 1;
}
@@ -1934,7 +2387,8 @@ e1000_tso(struct e1000_adapter *adapter, struct sk_buff *skb)
}
static inline boolean_t
-e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb)
+e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
+ struct sk_buff *skb)
{
struct e1000_context_desc *context_desc;
unsigned int i;
@@ -1943,8 +2397,8 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb)
if(likely(skb->ip_summed == CHECKSUM_HW)) {
css = skb->h.raw - skb->data;
- i = adapter->tx_ring.next_to_use;
- context_desc = E1000_CONTEXT_DESC(adapter->tx_ring, i);
+ i = tx_ring->next_to_use;
+ context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
context_desc->upper_setup.tcp_fields.tucss = css;
context_desc->upper_setup.tcp_fields.tucso = css + skb->csum;
@@ -1952,8 +2406,8 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb)
context_desc->tcp_seg_setup.data = 0;
context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT);
- if(unlikely(++i == adapter->tx_ring.count)) i = 0;
- adapter->tx_ring.next_to_use = i;
+ if (unlikely(++i == tx_ring->count)) i = 0;
+ tx_ring->next_to_use = i;
return TRUE;
}
@@ -1965,11 +2419,10 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb)
#define E1000_MAX_DATA_PER_TXD (1<<E1000_MAX_TXD_PWR)
static inline int
-e1000_tx_map(struct e1000_adapter *adapter, struct sk_buff *skb,
- unsigned int first, unsigned int max_per_txd,
- unsigned int nr_frags, unsigned int mss)
+e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
+ struct sk_buff *skb, unsigned int first, unsigned int max_per_txd,
+ unsigned int nr_frags, unsigned int mss)
{
- struct e1000_desc_ring *tx_ring = &adapter->tx_ring;
struct e1000_buffer *buffer_info;
unsigned int len = skb->len;
unsigned int offset = 0, size, count = 0, i;
@@ -2065,9 +2518,9 @@ e1000_tx_map(struct e1000_adapter *adapter, struct sk_buff *skb,
}
static inline void
-e1000_tx_queue(struct e1000_adapter *adapter, int count, int tx_flags)
+e1000_tx_queue(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
+ int tx_flags, int count)
{
- struct e1000_desc_ring *tx_ring = &adapter->tx_ring;
struct e1000_tx_desc *tx_desc = NULL;
struct e1000_buffer *buffer_info;
uint32_t txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
@@ -2113,7 +2566,7 @@ e1000_tx_queue(struct e1000_adapter *adapter, int count, int tx_flags)
wmb();
tx_ring->next_to_use = i;
- E1000_WRITE_REG(&adapter->hw, TDT, i);
+ writel(i, adapter->hw.hw_addr + tx_ring->tdt);
}
/**
@@ -2206,6 +2659,7 @@ static int
e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_tx_ring *tx_ring;
unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
unsigned int tx_flags = 0;
@@ -2218,7 +2672,13 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
unsigned int f;
len -= skb->data_len;
- if(unlikely(skb->len <= 0)) {
+#ifdef CONFIG_E1000_MQ
+ tx_ring = *per_cpu_ptr(adapter->cpu_tx_ring, smp_processor_id());
+#else
+ tx_ring = adapter->tx_ring;
+#endif
+
+ if (unlikely(skb->len <= 0)) {
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@@ -2262,21 +2722,42 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
if(adapter->pcix_82544)
count += nr_frags;
- local_irq_save(flags);
- if (!spin_trylock(&adapter->tx_lock)) {
- /* Collision - tell upper layer to requeue */
- local_irq_restore(flags);
- return NETDEV_TX_LOCKED;
- }
+#ifdef NETIF_F_TSO
+ /* TSO Workaround for 82571/2 Controllers -- if skb->data
+ * points to just header, pull a few bytes of payload from
+ * frags into skb->data */
+ if (skb_shinfo(skb)->tso_size) {
+ uint8_t hdr_len;
+ hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
+ if (skb->data_len && (hdr_len < (skb->len - skb->data_len)) &&
+ (adapter->hw.mac_type == e1000_82571 ||
+ adapter->hw.mac_type == e1000_82572)) {
+ unsigned int pull_size;
+ pull_size = min((unsigned int)4, skb->data_len);
+ if (!__pskb_pull_tail(skb, pull_size)) {
+ printk(KERN_ERR "__pskb_pull_tail failed.\n");
+ dev_kfree_skb_any(skb);
+ return -EFAULT;
+ }
+ }
+ }
+#endif
+
if(adapter->hw.tx_pkt_filtering && (adapter->hw.mac_type == e1000_82573) )
e1000_transfer_dhcp_info(adapter, skb);
+ local_irq_save(flags);
+ if (!spin_trylock(&tx_ring->tx_lock)) {
+ /* Collision - tell upper layer to requeue */
+ local_irq_restore(flags);
+ return NETDEV_TX_LOCKED;
+ }
/* need: count + 2 desc gap to keep tail from touching
* head, otherwise try next time */
- if(unlikely(E1000_DESC_UNUSED(&adapter->tx_ring) < count + 2)) {
+ if (unlikely(E1000_DESC_UNUSED(tx_ring) < count + 2)) {
netif_stop_queue(netdev);
- spin_unlock_irqrestore(&adapter->tx_lock, flags);
+ spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
return NETDEV_TX_BUSY;
}
@@ -2284,7 +2765,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
if(unlikely(e1000_82547_fifo_workaround(adapter, skb))) {
netif_stop_queue(netdev);
mod_timer(&adapter->tx_fifo_stall_timer, jiffies);
- spin_unlock_irqrestore(&adapter->tx_lock, flags);
+ spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
return NETDEV_TX_BUSY;
}
}
@@ -2294,37 +2775,37 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
}
- first = adapter->tx_ring.next_to_use;
+ first = tx_ring->next_to_use;
- tso = e1000_tso(adapter, skb);
+ tso = e1000_tso(adapter, tx_ring, skb);
if (tso < 0) {
dev_kfree_skb_any(skb);
- spin_unlock_irqrestore(&adapter->tx_lock, flags);
+ spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
return NETDEV_TX_OK;
}
if (likely(tso))
tx_flags |= E1000_TX_FLAGS_TSO;
- else if(likely(e1000_tx_csum(adapter, skb)))
+ else if (likely(e1000_tx_csum(adapter, tx_ring, skb)))
tx_flags |= E1000_TX_FLAGS_CSUM;
/* Old method was to assume IPv4 packet by default if TSO was enabled.
- * 82573 hardware supports TSO capabilities for IPv6 as well...
+ * 82571 hardware supports TSO capabilities for IPv6 as well...
* no longer assume, we must. */
- if(likely(skb->protocol == ntohs(ETH_P_IP)))
+ if (likely(skb->protocol == ntohs(ETH_P_IP)))
tx_flags |= E1000_TX_FLAGS_IPV4;
- e1000_tx_queue(adapter,
- e1000_tx_map(adapter, skb, first, max_per_txd, nr_frags, mss),
- tx_flags);
+ e1000_tx_queue(adapter, tx_ring, tx_flags,
+ e1000_tx_map(adapter, tx_ring, skb, first,
+ max_per_txd, nr_frags, mss));
netdev->trans_start = jiffies;
/* Make sure there is space in the ring for the next send. */
- if(unlikely(E1000_DESC_UNUSED(&adapter->tx_ring) < MAX_SKB_FRAGS + 2))
+ if (unlikely(E1000_DESC_UNUSED(tx_ring) < MAX_SKB_FRAGS + 2))
netif_stop_queue(netdev);
- spin_unlock_irqrestore(&adapter->tx_lock, flags);
+ spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
return NETDEV_TX_OK;
}
@@ -2388,9 +2869,18 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu)
return -EINVAL;
}
-#define MAX_STD_JUMBO_FRAME_SIZE 9216
+#define MAX_STD_JUMBO_FRAME_SIZE 9234
/* might want this to be bigger enum check... */
- if (adapter->hw.mac_type == e1000_82573 &&
+ /* 82571 controllers limit jumbo frame size to 10500 bytes */
+ if ((adapter->hw.mac_type == e1000_82571 ||
+ adapter->hw.mac_type == e1000_82572) &&
+ max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
+ DPRINTK(PROBE, ERR, "MTU > 9216 bytes not supported "
+ "on 82571 and 82572 controllers.\n");
+ return -EINVAL;
+ }
+
+ if(adapter->hw.mac_type == e1000_82573 &&
max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) {
DPRINTK(PROBE, ERR, "Jumbo Frames not supported "
"on 82573\n");
@@ -2578,6 +3068,29 @@ e1000_update_stats(struct e1000_adapter *adapter)
spin_unlock_irqrestore(&adapter->stats_lock, flags);
}
+#ifdef CONFIG_E1000_MQ
+void
+e1000_rx_schedule(void *data)
+{
+ struct net_device *poll_dev, *netdev = data;
+ struct e1000_adapter *adapter = netdev->priv;
+ int this_cpu = get_cpu();
+
+ poll_dev = *per_cpu_ptr(adapter->cpu_netdev, this_cpu);
+ if (poll_dev == NULL) {
+ put_cpu();
+ return;
+ }
+
+ if (likely(netif_rx_schedule_prep(poll_dev)))
+ __netif_rx_schedule(poll_dev);
+ else
+ e1000_irq_enable(adapter);
+
+ put_cpu();
+}
+#endif
+
/**
* e1000_intr - Interrupt Handler
* @irq: interrupt number
@@ -2592,8 +3105,8 @@ e1000_intr(int irq, void *data, struct pt_regs *regs)
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
uint32_t icr = E1000_READ_REG(hw, ICR);
-#ifndef CONFIG_E1000_NAPI
- unsigned int i;
+#if defined(CONFIG_E1000_NAPI) && defined(CONFIG_E1000_MQ) || !defined(CONFIG_E1000_NAPI)
+ int i;
#endif
if(unlikely(!icr))
@@ -2605,17 +3118,31 @@ e1000_intr(int irq, void *data, struct pt_regs *regs)
}
#ifdef CONFIG_E1000_NAPI
- if(likely(netif_rx_schedule_prep(netdev))) {
-
- /* Disable interrupts and register for poll. The flush
- of the posted write is intentionally left out.
- */
-
- atomic_inc(&adapter->irq_sem);
- E1000_WRITE_REG(hw, IMC, ~0);
- __netif_rx_schedule(netdev);
+ atomic_inc(&adapter->irq_sem);
+ E1000_WRITE_REG(hw, IMC, ~0);
+ E1000_WRITE_FLUSH(hw);
+#ifdef CONFIG_E1000_MQ
+ if (atomic_read(&adapter->rx_sched_call_data.count) == 0) {
+ cpu_set(adapter->cpu_for_queue[0],
+ adapter->rx_sched_call_data.cpumask);
+ for (i = 1; i < adapter->num_queues; i++) {
+ cpu_set(adapter->cpu_for_queue[i],
+ adapter->rx_sched_call_data.cpumask);
+ atomic_inc(&adapter->irq_sem);
+ }
+ atomic_set(&adapter->rx_sched_call_data.count, i);
+ smp_call_async_mask(&adapter->rx_sched_call_data);
+ } else {
+ printk("call_data.count == %u\n", atomic_read(&adapter->rx_sched_call_data.count));
}
-#else
+#else /* if !CONFIG_E1000_MQ */
+ if (likely(netif_rx_schedule_prep(&adapter->polling_netdev[0])))
+ __netif_rx_schedule(&adapter->polling_netdev[0]);
+ else
+ e1000_irq_enable(adapter);
+#endif /* CONFIG_E1000_MQ */
+
+#else /* if !CONFIG_E1000_NAPI */
/* Writing IMC and IMS is needed for 82547.
Due to Hub Link bus being occupied, an interrupt
de-assertion message is not able to be sent.
@@ -2632,13 +3159,14 @@ e1000_intr(int irq, void *data, struct pt_regs *regs)
}
for(i = 0; i < E1000_MAX_INTR; i++)
- if(unlikely(!adapter->clean_rx(adapter) &
- !e1000_clean_tx_irq(adapter)))
+ if(unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) &
+ !e1000_clean_tx_irq(adapter, adapter->tx_ring)))
break;
if(hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2)
e1000_irq_enable(adapter);
-#endif
+
+#endif /* CONFIG_E1000_NAPI */
return IRQ_HANDLED;
}
@@ -2650,22 +3178,37 @@ e1000_intr(int irq, void *data, struct pt_regs *regs)
**/
static int
-e1000_clean(struct net_device *netdev, int *budget)
+e1000_clean(struct net_device *poll_dev, int *budget)
{
- struct e1000_adapter *adapter = netdev_priv(netdev);
- int work_to_do = min(*budget, netdev->quota);
- int tx_cleaned;
- int work_done = 0;
+ struct e1000_adapter *adapter;
+ int work_to_do = min(*budget, poll_dev->quota);
+ int tx_cleaned, i = 0, work_done = 0;
- tx_cleaned = e1000_clean_tx_irq(adapter);
- adapter->clean_rx(adapter, &work_done, work_to_do);
+ /* Must NOT use netdev_priv macro here. */
+ adapter = poll_dev->priv;
+
+ /* Keep link state information with original netdev */
+ if (!netif_carrier_ok(adapter->netdev))
+ goto quit_polling;
+
+ while (poll_dev != &adapter->polling_netdev[i]) {
+ i++;
+ if (unlikely(i == adapter->num_queues))
+ BUG();
+ }
+
+ tx_cleaned = e1000_clean_tx_irq(adapter, &adapter->tx_ring[i]);
+ adapter->clean_rx(adapter, &adapter->rx_ring[i],
+ &work_done, work_to_do);
*budget -= work_done;
- netdev->quota -= work_done;
+ poll_dev->quota -= work_done;
- if ((!tx_cleaned && (work_done == 0)) || !netif_running(netdev)) {
/* If no Tx and not enough Rx work done, exit the polling mode */
- netif_rx_complete(netdev);
+ if((!tx_cleaned && (work_done == 0)) ||
+ !netif_running(adapter->netdev)) {
+quit_polling:
+ netif_rx_complete(poll_dev);
e1000_irq_enable(adapter);
return 0;
}
@@ -2680,9 +3223,9 @@ e1000_clean(struct net_device *netdev, int *budget)
**/
static boolean_t
-e1000_clean_tx_irq(struct e1000_adapter *adapter)
+e1000_clean_tx_irq(struct e1000_adapter *adapter,
+ struct e1000_tx_ring *tx_ring)
{
- struct e1000_desc_ring *tx_ring = &adapter->tx_ring;
struct net_device *netdev = adapter->netdev;
struct e1000_tx_desc *tx_desc, *eop_desc;
struct e1000_buffer *buffer_info;
@@ -2693,12 +3236,12 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter)
eop = tx_ring->buffer_info[i].next_to_watch;
eop_desc = E1000_TX_DESC(*tx_ring, eop);
- while(eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) {
+ while (eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) {
/* Premature writeback of Tx descriptors clear (free buffers
* and unmap pci_mapping) previous_buffer_info */
- if (likely(adapter->previous_buffer_info.skb != NULL)) {
+ if (likely(tx_ring->previous_buffer_info.skb != NULL)) {
e1000_unmap_and_free_tx_resource(adapter,
- &adapter->previous_buffer_info);
+ &tx_ring->previous_buffer_info);
}
for(cleaned = FALSE; !cleaned; ) {
@@ -2714,7 +3257,7 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter)
#ifdef NETIF_F_TSO
} else {
if (cleaned) {
- memcpy(&adapter->previous_buffer_info,
+ memcpy(&tx_ring->previous_buffer_info,
buffer_info,
sizeof(struct e1000_buffer));
memset(buffer_info, 0,
@@ -2732,6 +3275,8 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter)
if(unlikely(++i == tx_ring->count)) i = 0;
}
+
+ tx_ring->pkt++;
eop = tx_ring->buffer_info[i].next_to_watch;
eop_desc = E1000_TX_DESC(*tx_ring, eop);
@@ -2739,15 +3284,15 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter)
tx_ring->next_to_clean = i;
- spin_lock(&adapter->tx_lock);
+ spin_lock(&tx_ring->tx_lock);
if(unlikely(cleaned && netif_queue_stopped(netdev) &&
netif_carrier_ok(netdev)))
netif_wake_queue(netdev);
- spin_unlock(&adapter->tx_lock);
- if(adapter->detect_tx_hung) {
+ spin_unlock(&tx_ring->tx_lock);
+ if (adapter->detect_tx_hung) {
/* Detect a transmit hang in hardware, this serializes the
* check with the clearing of time_stamp and movement of i */
adapter->detect_tx_hung = FALSE;
@@ -2771,8 +3316,8 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter)
" next_to_watch <%x>\n"
" jiffies <%lx>\n"
" next_to_watch.status <%x>\n",
- E1000_READ_REG(&adapter->hw, TDH),
- E1000_READ_REG(&adapter->hw, TDT),
+ readl(adapter->hw.hw_addr + tx_ring->tdh),
+ readl(adapter->hw.hw_addr + tx_ring->tdt),
tx_ring->next_to_use,
i,
(unsigned long long)tx_ring->buffer_info[i].dma,
@@ -2784,12 +3329,10 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter)
}
}
#ifdef NETIF_F_TSO
-
- if( unlikely(!(eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
- time_after(jiffies, adapter->previous_buffer_info.time_stamp + HZ)))
+ if (unlikely(!(eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
+ time_after(jiffies, tx_ring->previous_buffer_info.time_stamp + HZ)))
e1000_unmap_and_free_tx_resource(
- adapter, &adapter->previous_buffer_info);
-
+ adapter, &tx_ring->previous_buffer_info);
#endif
return cleaned;
}
@@ -2852,13 +3395,14 @@ e1000_rx_checksum(struct e1000_adapter *adapter,
static boolean_t
#ifdef CONFIG_E1000_NAPI
-e1000_clean_rx_irq(struct e1000_adapter *adapter, int *work_done,
- int work_to_do)
+e1000_clean_rx_irq(struct e1000_adapter *adapter,
+ struct e1000_rx_ring *rx_ring,
+ int *work_done, int work_to_do)
#else
-e1000_clean_rx_irq(struct e1000_adapter *adapter)
+e1000_clean_rx_irq(struct e1000_adapter *adapter,
+ struct e1000_rx_ring *rx_ring)
#endif
{
- struct e1000_desc_ring *rx_ring = &adapter->rx_ring;
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
struct e1000_rx_desc *rx_desc;
@@ -2944,6 +3488,7 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter)
}
#endif /* CONFIG_E1000_NAPI */
netdev->last_rx = jiffies;
+ rx_ring->pkt++;
next_desc:
rx_desc->status = 0;
@@ -2953,7 +3498,7 @@ next_desc:
rx_desc = E1000_RX_DESC(*rx_ring, i);
}
rx_ring->next_to_clean = i;
- adapter->alloc_rx_buf(adapter);
+ adapter->alloc_rx_buf(adapter, rx_ring);
return cleaned;
}
@@ -2965,13 +3510,14 @@ next_desc:
static boolean_t
#ifdef CONFIG_E1000_NAPI
-e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, int *work_done,
- int work_to_do)
+e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
+ struct e1000_rx_ring *rx_ring,
+ int *work_done, int work_to_do)
#else
-e1000_clean_rx_irq_ps(struct e1000_adapter *adapter)
+e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
+ struct e1000_rx_ring *rx_ring)
#endif
{
- struct e1000_desc_ring *rx_ring = &adapter->rx_ring;
union e1000_rx_desc_packet_split *rx_desc;
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
@@ -3027,7 +3573,7 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter)
/* Good Receive */
skb_put(skb, length);
- for(j = 0; j < PS_PAGE_BUFFERS; j++) {
+ for(j = 0; j < adapter->rx_ps_pages; j++) {
if(!(length = le16_to_cpu(rx_desc->wb.upper.length[j])))
break;
@@ -3048,11 +3594,13 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter)
rx_desc->wb.lower.hi_dword.csum_ip.csum, skb);
skb->protocol = eth_type_trans(skb, netdev);
-#ifdef HAVE_RX_ZERO_COPY
if(likely(rx_desc->wb.upper.header_status &
- E1000_RXDPS_HDRSTAT_HDRSP))
+ E1000_RXDPS_HDRSTAT_HDRSP)) {
+ adapter->rx_hdr_split++;
+#ifdef HAVE_RX_ZERO_COPY
skb_shinfo(skb)->zero_copy = TRUE;
#endif
+ }
#ifdef CONFIG_E1000_NAPI
if(unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) {
vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
@@ -3071,6 +3619,7 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter)
}
#endif /* CONFIG_E1000_NAPI */
netdev->last_rx = jiffies;
+ rx_ring->pkt++;
next_desc:
rx_desc->wb.middle.status_error &= ~0xFF;
@@ -3081,7 +3630,7 @@ next_desc:
staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
}
rx_ring->next_to_clean = i;
- adapter->alloc_rx_buf(adapter);
+ adapter->alloc_rx_buf(adapter, rx_ring);
return cleaned;
}
@@ -3092,9 +3641,9 @@ next_desc:
**/
static void
-e1000_alloc_rx_buffers(struct e1000_adapter *adapter)
+e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
+ struct e1000_rx_ring *rx_ring)
{
- struct e1000_desc_ring *rx_ring = &adapter->rx_ring;
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
struct e1000_rx_desc *rx_desc;
@@ -3178,7 +3727,7 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter)
* applicable for weak-ordered memory model archs,
* such as IA-64). */
wmb();
- E1000_WRITE_REG(&adapter->hw, RDT, i);
+ writel(i, adapter->hw.hw_addr + rx_ring->rdt);
}
if(unlikely(++i == rx_ring->count)) i = 0;
@@ -3194,9 +3743,9 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter)
**/
static void
-e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter)
+e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
+ struct e1000_rx_ring *rx_ring)
{
- struct e1000_desc_ring *rx_ring = &adapter->rx_ring;
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
union e1000_rx_desc_packet_split *rx_desc;
@@ -3215,22 +3764,26 @@ e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter)
rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
for(j = 0; j < PS_PAGE_BUFFERS; j++) {
- if(unlikely(!ps_page->ps_page[j])) {
- ps_page->ps_page[j] =
- alloc_page(GFP_ATOMIC);
- if(unlikely(!ps_page->ps_page[j]))
- goto no_buffers;
- ps_page_dma->ps_page_dma[j] =
- pci_map_page(pdev,
- ps_page->ps_page[j],
- 0, PAGE_SIZE,
- PCI_DMA_FROMDEVICE);
- }
- /* Refresh the desc even if buffer_addrs didn't
- * change because each write-back erases this info.
- */
- rx_desc->read.buffer_addr[j+1] =
- cpu_to_le64(ps_page_dma->ps_page_dma[j]);
+ if (j < adapter->rx_ps_pages) {
+ if (likely(!ps_page->ps_page[j])) {
+ ps_page->ps_page[j] =
+ alloc_page(GFP_ATOMIC);
+ if (unlikely(!ps_page->ps_page[j]))
+ goto no_buffers;
+ ps_page_dma->ps_page_dma[j] =
+ pci_map_page(pdev,
+ ps_page->ps_page[j],
+ 0, PAGE_SIZE,
+ PCI_DMA_FROMDEVICE);
+ }
+ /* Refresh the desc even if buffer_addrs didn't
+ * change because each write-back erases
+ * this info.
+ */
+ rx_desc->read.buffer_addr[j+1] =
+ cpu_to_le64(ps_page_dma->ps_page_dma[j]);
+ } else
+ rx_desc->read.buffer_addr[j+1] = ~0;
}
skb = dev_alloc_skb(adapter->rx_ps_bsize0 + NET_IP_ALIGN);
@@ -3264,7 +3817,7 @@ e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter)
* descriptors are 32 bytes...so we increment tail
* twice as much.
*/
- E1000_WRITE_REG(&adapter->hw, RDT, i<<1);
+ writel(i<<1, adapter->hw.hw_addr + rx_ring->rdt);
}
if(unlikely(++i == rx_ring->count)) i = 0;
@@ -3715,6 +4268,12 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
}
switch(adapter->hw.mac_type) {
+ case e1000_82571:
+ case e1000_82572:
+ ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
+ E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
+ ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
+ break;
case e1000_82573:
swsm = E1000_READ_REG(&adapter->hw, SWSM);
E1000_WRITE_REG(&adapter->hw, SWSM,
@@ -3737,6 +4296,7 @@ e1000_resume(struct pci_dev *pdev)
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);
uint32_t manc, ret_val, swsm;
+ uint32_t ctrl_ext;
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
@@ -3762,6 +4322,12 @@ e1000_resume(struct pci_dev *pdev)
}
switch(adapter->hw.mac_type) {
+ case e1000_82571:
+ case e1000_82572:
+ ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
+ E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
+ ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
+ break;
case e1000_82573:
swsm = E1000_READ_REG(&adapter->hw, SWSM);
E1000_WRITE_REG(&adapter->hw, SWSM,
@@ -3786,7 +4352,7 @@ e1000_netpoll(struct net_device *netdev)
struct e1000_adapter *adapter = netdev_priv(netdev);
disable_irq(adapter->pdev->irq);
e1000_intr(adapter->pdev->irq, netdev, NULL);
- e1000_clean_tx_irq(adapter);
+ e1000_clean_tx_irq(adapter, adapter->tx_ring);
enable_irq(adapter->pdev->irq);
}
#endif
diff --git a/drivers/net/e1000/e1000_param.c b/drivers/net/e1000/e1000_param.c
index 676247f9f1c..38695d5b463 100644
--- a/drivers/net/e1000/e1000_param.c
+++ b/drivers/net/e1000/e1000_param.c
@@ -306,7 +306,8 @@ e1000_check_options(struct e1000_adapter *adapter)
.def = E1000_DEFAULT_TXD,
.arg = { .r = { .min = E1000_MIN_TXD }}
};
- struct e1000_desc_ring *tx_ring = &adapter->tx_ring;
+ struct e1000_tx_ring *tx_ring = adapter->tx_ring;
+ int i;
e1000_mac_type mac_type = adapter->hw.mac_type;
opt.arg.r.max = mac_type < e1000_82544 ?
E1000_MAX_TXD : E1000_MAX_82544_TXD;
@@ -319,6 +320,8 @@ e1000_check_options(struct e1000_adapter *adapter)
} else {
tx_ring->count = opt.def;
}
+ for (i = 0; i < adapter->num_queues; i++)
+ tx_ring[i].count = tx_ring->count;
}
{ /* Receive Descriptor Count */
struct e1000_option opt = {
@@ -329,7 +332,8 @@ e1000_check_options(struct e1000_adapter *adapter)
.def = E1000_DEFAULT_RXD,
.arg = { .r = { .min = E1000_MIN_RXD }}
};
- struct e1000_desc_ring *rx_ring = &adapter->rx_ring;
+ struct e1000_rx_ring *rx_ring = adapter->rx_ring;
+ int i;
e1000_mac_type mac_type = adapter->hw.mac_type;
opt.arg.r.max = mac_type < e1000_82544 ? E1000_MAX_RXD :
E1000_MAX_82544_RXD;
@@ -342,6 +346,8 @@ e1000_check_options(struct e1000_adapter *adapter)
} else {
rx_ring->count = opt.def;
}
+ for (i = 0; i < adapter->num_queues; i++)
+ rx_ring[i].count = rx_ring->count;
}
{ /* Checksum Offload Enable/Disable */
struct e1000_option opt = {
diff --git a/drivers/net/epic100.c b/drivers/net/epic100.c
index 87f522738bf..f119ec4e89e 100644
--- a/drivers/net/epic100.c
+++ b/drivers/net/epic100.c
@@ -1334,7 +1334,7 @@ static void epic_rx_err(struct net_device *dev, struct epic_private *ep)
static int epic_poll(struct net_device *dev, int *budget)
{
struct epic_private *ep = dev->priv;
- int work_done, orig_budget;
+ int work_done = 0, orig_budget;
long ioaddr = dev->base_addr;
orig_budget = (*budget > dev->quota) ? dev->quota : *budget;
@@ -1343,7 +1343,7 @@ rx_action:
epic_tx(dev, ep);
- work_done = epic_rx(dev, *budget);
+ work_done += epic_rx(dev, *budget);
epic_rx_err(dev, ep);
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index d6eefdb71c1..22aec6ed80f 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -95,6 +95,8 @@
* of nv_remove
* 0.42: 06 Aug 2005: Fix lack of link speed initialization
* in the second (and later) nv_open call
+ * 0.43: 10 Aug 2005: Add support for tx checksum.
+ * 0.44: 20 Aug 2005: Add support for scatter gather and segmentation.
*
* Known bugs:
* We suspect that on some hardware no TX done interrupts are generated.
@@ -106,7 +108,7 @@
* DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
* superfluous timer interrupts from the nic.
*/
-#define FORCEDETH_VERSION "0.41"
+#define FORCEDETH_VERSION "0.44"
#define DRV_NAME "forcedeth"
#include <linux/module.h>
@@ -145,6 +147,7 @@
#define DEV_NEED_LINKTIMER 0x0002 /* poll link settings. Relies on the timer irq */
#define DEV_HAS_LARGEDESC 0x0004 /* device supports jumbo frames and needs packet format 2 */
#define DEV_HAS_HIGH_DMA 0x0008 /* device supports 64bit dma */
+#define DEV_HAS_CHECKSUM 0x0010 /* device supports tx and rx checksum offloads */
enum {
NvRegIrqStatus = 0x000,
@@ -241,6 +244,9 @@ enum {
#define NVREG_TXRXCTL_IDLE 0x0008
#define NVREG_TXRXCTL_RESET 0x0010
#define NVREG_TXRXCTL_RXCHECK 0x0400
+#define NVREG_TXRXCTL_DESC_1 0
+#define NVREG_TXRXCTL_DESC_2 0x02100
+#define NVREG_TXRXCTL_DESC_3 0x02200
NvRegMIIStatus = 0x180,
#define NVREG_MIISTAT_ERROR 0x0001
#define NVREG_MIISTAT_LINKCHANGE 0x0008
@@ -335,6 +341,10 @@ typedef union _ring_type {
/* error and valid are the same for both */
#define NV_TX2_ERROR (1<<30)
#define NV_TX2_VALID (1<<31)
+#define NV_TX2_TSO (1<<28)
+#define NV_TX2_TSO_SHIFT 14
+#define NV_TX2_CHECKSUM_L3 (1<<27)
+#define NV_TX2_CHECKSUM_L4 (1<<26)
#define NV_RX_DESCRIPTORVALID (1<<16)
#define NV_RX_MISSEDFRAME (1<<17)
@@ -417,14 +427,14 @@ typedef union _ring_type {
/*
* desc_ver values:
- * This field has two purposes:
- * - Newer nics uses a different ring layout. The layout is selected by
- * comparing np->desc_ver with DESC_VER_xy.
- * - It contains bits that are forced on when writing to NvRegTxRxControl.
+ * The nic supports three different descriptor types:
+ * - DESC_VER_1: Original
+ * - DESC_VER_2: support for jumbo frames.
+ * - DESC_VER_3: 64-bit format.
*/
-#define DESC_VER_1 0x0
-#define DESC_VER_2 (0x02100|NVREG_TXRXCTL_RXCHECK)
-#define DESC_VER_3 (0x02200|NVREG_TXRXCTL_RXCHECK)
+#define DESC_VER_1 1
+#define DESC_VER_2 2
+#define DESC_VER_3 3
/* PHY defines */
#define PHY_OUI_MARVELL 0x5043
@@ -491,6 +501,7 @@ struct fe_priv {
u32 orig_mac[2];
u32 irqmask;
u32 desc_ver;
+ u32 txrxctl_bits;
void __iomem *base;
@@ -534,7 +545,7 @@ static inline struct fe_priv *get_nvpriv(struct net_device *dev)
static inline u8 __iomem *get_hwbase(struct net_device *dev)
{
- return get_nvpriv(dev)->base;
+ return ((struct fe_priv *)netdev_priv(dev))->base;
}
static inline void pci_push(u8 __iomem *base)
@@ -623,7 +634,7 @@ static int mii_rw(struct net_device *dev, int addr, int miireg, int value)
static int phy_reset(struct net_device *dev)
{
- struct fe_priv *np = get_nvpriv(dev);
+ struct fe_priv *np = netdev_priv(dev);
u32 miicontrol;
unsigned int tries = 0;
@@ -726,7 +737,7 @@ static int phy_init(struct net_device *dev)
static void nv_start_rx(struct net_device *dev)
{
- struct fe_priv *np = get_nvpriv(dev);
+ struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
dprintk(KERN_DEBUG "%s: nv_start_rx\n", dev->name);
@@ -782,14 +793,14 @@ static void nv_stop_tx(struct net_device *dev)
static void nv_txrx_reset(struct net_device *dev)
{
- struct fe_priv *np = get_nvpriv(dev);
+ struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
dprintk(KERN_DEBUG "%s: nv_txrx_reset\n", dev->name);
- writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->desc_ver, base + NvRegTxRxControl);
+ writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
pci_push(base);
udelay(NV_TXRX_RESET_DELAY);
- writel(NVREG_TXRXCTL_BIT2 | np->desc_ver, base + NvRegTxRxControl);
+ writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl);
pci_push(base);
}
@@ -801,7 +812,7 @@ static void nv_txrx_reset(struct net_device *dev)
*/
static struct net_device_stats *nv_get_stats(struct net_device *dev)
{
- struct fe_priv *np = get_nvpriv(dev);
+ struct fe_priv *np = netdev_priv(dev);
/* It seems that the nic always generates interrupts and doesn't
* accumulate errors internally. Thus the current values in np->stats
@@ -817,7 +828,7 @@ static struct net_device_stats *nv_get_stats(struct net_device *dev)
*/
static int nv_alloc_rx(struct net_device *dev)
{
- struct fe_priv *np = get_nvpriv(dev);
+ struct fe_priv *np = netdev_priv(dev);
unsigned int refill_rx = np->refill_rx;
int nr;
@@ -861,7 +872,7 @@ static int nv_alloc_rx(struct net_device *dev)
static void nv_do_rx_refill(unsigned long data)
{
struct net_device *dev = (struct net_device *) data;
- struct fe_priv *np = get_nvpriv(dev);
+ struct fe_priv *np = netdev_priv(dev);
disable_irq(dev->irq);
if (nv_alloc_rx(dev)) {
@@ -875,7 +886,7 @@ static void nv_do_rx_refill(unsigned long data)
static void nv_init_rx(struct net_device *dev)
{
- struct fe_priv *np = get_nvpriv(dev);
+ struct fe_priv *np = netdev_priv(dev);
int i;
np->cur_rx = RX_RING;
@@ -889,15 +900,17 @@ static void nv_init_rx(struct net_device *dev)
static void nv_init_tx(struct net_device *dev)
{
- struct fe_priv *np = get_nvpriv(dev);
+ struct fe_priv *np = netdev_priv(dev);
int i;
np->next_tx = np->nic_tx = 0;
- for (i = 0; i < TX_RING; i++)
+ for (i = 0; i < TX_RING; i++) {
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
np->tx_ring.orig[i].FlagLen = 0;
else
np->tx_ring.ex[i].FlagLen = 0;
+ np->tx_skbuff[i] = NULL;
+ }
}
static int nv_init_ring(struct net_device *dev)
@@ -907,21 +920,44 @@ static int nv_init_ring(struct net_device *dev)
return nv_alloc_rx(dev);
}
+static void nv_release_txskb(struct net_device *dev, unsigned int skbnr)
+{
+ struct fe_priv *np = netdev_priv(dev);
+ struct sk_buff *skb = np->tx_skbuff[skbnr];
+ unsigned int j, entry, fragments;
+
+ dprintk(KERN_INFO "%s: nv_release_txskb for skbnr %d, skb %p\n",
+ dev->name, skbnr, np->tx_skbuff[skbnr]);
+
+ entry = skbnr;
+ if ((fragments = skb_shinfo(skb)->nr_frags) != 0) {
+ for (j = fragments; j >= 1; j--) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[j-1];
+ pci_unmap_page(np->pci_dev, np->tx_dma[entry],
+ frag->size,
+ PCI_DMA_TODEVICE);
+ entry = (entry - 1) % TX_RING;
+ }
+ }
+ pci_unmap_single(np->pci_dev, np->tx_dma[entry],
+ skb->len - skb->data_len,
+ PCI_DMA_TODEVICE);
+ dev_kfree_skb_irq(skb);
+ np->tx_skbuff[skbnr] = NULL;
+}
+
static void nv_drain_tx(struct net_device *dev)
{
- struct fe_priv *np = get_nvpriv(dev);
- int i;
+ struct fe_priv *np = netdev_priv(dev);
+ unsigned int i;
+
for (i = 0; i < TX_RING; i++) {
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
np->tx_ring.orig[i].FlagLen = 0;
else
np->tx_ring.ex[i].FlagLen = 0;
if (np->tx_skbuff[i]) {
- pci_unmap_single(np->pci_dev, np->tx_dma[i],
- np->tx_skbuff[i]->len,
- PCI_DMA_TODEVICE);
- dev_kfree_skb(np->tx_skbuff[i]);
- np->tx_skbuff[i] = NULL;
+ nv_release_txskb(dev, i);
np->stats.tx_dropped++;
}
}
@@ -929,7 +965,7 @@ static void nv_drain_tx(struct net_device *dev)
static void nv_drain_rx(struct net_device *dev)
{
- struct fe_priv *np = get_nvpriv(dev);
+ struct fe_priv *np = netdev_priv(dev);
int i;
for (i = 0; i < RX_RING; i++) {
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
@@ -959,28 +995,69 @@ static void drain_ring(struct net_device *dev)
*/
static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
- struct fe_priv *np = get_nvpriv(dev);
- int nr = np->next_tx % TX_RING;
+ struct fe_priv *np = netdev_priv(dev);
+ u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
+ unsigned int fragments = skb_shinfo(skb)->nr_frags;
+ unsigned int nr = (np->next_tx + fragments) % TX_RING;
+ unsigned int i;
+
+ spin_lock_irq(&np->lock);
+
+ if ((np->next_tx - np->nic_tx + fragments) > TX_LIMIT_STOP) {
+ spin_unlock_irq(&np->lock);
+ netif_stop_queue(dev);
+ return NETDEV_TX_BUSY;
+ }
np->tx_skbuff[nr] = skb;
- np->tx_dma[nr] = pci_map_single(np->pci_dev, skb->data,skb->len,
- PCI_DMA_TODEVICE);
+
+ if (fragments) {
+ dprintk(KERN_DEBUG "%s: nv_start_xmit: buffer contains %d fragments\n", dev->name, fragments);
+ /* setup descriptors in reverse order */
+ for (i = fragments; i >= 1; i--) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
+ np->tx_dma[nr] = pci_map_page(np->pci_dev, frag->page, frag->page_offset, frag->size,
+ PCI_DMA_TODEVICE);
- if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
+ if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
+ np->tx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]);
+ np->tx_ring.orig[nr].FlagLen = cpu_to_le32( (frag->size-1) | np->tx_flags | tx_flags_extra);
+ } else {
+ np->tx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->tx_dma[nr]) >> 32;
+ np->tx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF;
+ np->tx_ring.ex[nr].FlagLen = cpu_to_le32( (frag->size-1) | np->tx_flags | tx_flags_extra);
+ }
+
+ nr = (nr - 1) % TX_RING;
+
+ if (np->desc_ver == DESC_VER_1)
+ tx_flags_extra &= ~NV_TX_LASTPACKET;
+ else
+ tx_flags_extra &= ~NV_TX2_LASTPACKET;
+ }
+ }
+
+#ifdef NETIF_F_TSO
+ if (skb_shinfo(skb)->tso_size)
+ tx_flags_extra |= NV_TX2_TSO | (skb_shinfo(skb)->tso_size << NV_TX2_TSO_SHIFT);
+ else
+#endif
+ tx_flags_extra |= (skb->ip_summed == CHECKSUM_HW ? (NV_TX2_CHECKSUM_L3|NV_TX2_CHECKSUM_L4) : 0);
+
+ np->tx_dma[nr] = pci_map_single(np->pci_dev, skb->data, skb->len-skb->data_len,
+ PCI_DMA_TODEVICE);
+
+ if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
np->tx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]);
- else {
+ np->tx_ring.orig[nr].FlagLen = cpu_to_le32( (skb->len-skb->data_len-1) | np->tx_flags | tx_flags_extra);
+ } else {
np->tx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->tx_dma[nr]) >> 32;
np->tx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF;
- }
+ np->tx_ring.ex[nr].FlagLen = cpu_to_le32( (skb->len-skb->data_len-1) | np->tx_flags | tx_flags_extra);
+ }
- spin_lock_irq(&np->lock);
- wmb();
- if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
- np->tx_ring.orig[nr].FlagLen = cpu_to_le32( (skb->len-1) | np->tx_flags );
- else
- np->tx_ring.ex[nr].FlagLen = cpu_to_le32( (skb->len-1) | np->tx_flags );
- dprintk(KERN_DEBUG "%s: nv_start_xmit: packet packet %d queued for transmission.\n",
- dev->name, np->next_tx);
+ dprintk(KERN_DEBUG "%s: nv_start_xmit: packet packet %d queued for transmission. tx_flags_extra: %x\n",
+ dev->name, np->next_tx, tx_flags_extra);
{
int j;
for (j=0; j<64; j++) {
@@ -991,15 +1068,13 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
dprintk("\n");
}
- np->next_tx++;
+ np->next_tx += 1 + fragments;
dev->trans_start = jiffies;
- if (np->next_tx - np->nic_tx >= TX_LIMIT_STOP)
- netif_stop_queue(dev);
spin_unlock_irq(&np->lock);
- writel(NVREG_TXRXCTL_KICK|np->desc_ver, get_hwbase(dev) + NvRegTxRxControl);
+ writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
pci_push(get_hwbase(dev));
- return 0;
+ return NETDEV_TX_OK;
}
/*
@@ -1009,9 +1084,10 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
*/
static void nv_tx_done(struct net_device *dev)
{
- struct fe_priv *np = get_nvpriv(dev);
+ struct fe_priv *np = netdev_priv(dev);
u32 Flags;
- int i;
+ unsigned int i;
+ struct sk_buff *skb;
while (np->nic_tx != np->next_tx) {
i = np->nic_tx % TX_RING;
@@ -1026,35 +1102,38 @@ static void nv_tx_done(struct net_device *dev)
if (Flags & NV_TX_VALID)
break;
if (np->desc_ver == DESC_VER_1) {
- if (Flags & (NV_TX_RETRYERROR|NV_TX_CARRIERLOST|NV_TX_LATECOLLISION|
- NV_TX_UNDERFLOW|NV_TX_ERROR)) {
- if (Flags & NV_TX_UNDERFLOW)
- np->stats.tx_fifo_errors++;
- if (Flags & NV_TX_CARRIERLOST)
- np->stats.tx_carrier_errors++;
- np->stats.tx_errors++;
- } else {
- np->stats.tx_packets++;
- np->stats.tx_bytes += np->tx_skbuff[i]->len;
+ if (Flags & NV_TX_LASTPACKET) {
+ skb = np->tx_skbuff[i];
+ if (Flags & (NV_TX_RETRYERROR|NV_TX_CARRIERLOST|NV_TX_LATECOLLISION|
+ NV_TX_UNDERFLOW|NV_TX_ERROR)) {
+ if (Flags & NV_TX_UNDERFLOW)
+ np->stats.tx_fifo_errors++;
+ if (Flags & NV_TX_CARRIERLOST)
+ np->stats.tx_carrier_errors++;
+ np->stats.tx_errors++;
+ } else {
+ np->stats.tx_packets++;
+ np->stats.tx_bytes += skb->len;
+ }
+ nv_release_txskb(dev, i);
}
} else {
- if (Flags & (NV_TX2_RETRYERROR|NV_TX2_CARRIERLOST|NV_TX2_LATECOLLISION|
- NV_TX2_UNDERFLOW|NV_TX2_ERROR)) {
- if (Flags & NV_TX2_UNDERFLOW)
- np->stats.tx_fifo_errors++;
- if (Flags & NV_TX2_CARRIERLOST)
- np->stats.tx_carrier_errors++;
- np->stats.tx_errors++;
- } else {
- np->stats.tx_packets++;
- np->stats.tx_bytes += np->tx_skbuff[i]->len;
+ if (Flags & NV_TX2_LASTPACKET) {
+ skb = np->tx_skbuff[i];
+ if (Flags & (NV_TX2_RETRYERROR|NV_TX2_CARRIERLOST|NV_TX2_LATECOLLISION|
+ NV_TX2_UNDERFLOW|NV_TX2_ERROR)) {
+ if (Flags & NV_TX2_UNDERFLOW)
+ np->stats.tx_fifo_errors++;
+ if (Flags & NV_TX2_CARRIERLOST)
+ np->stats.tx_carrier_errors++;
+ np->stats.tx_errors++;
+ } else {
+ np->stats.tx_packets++;
+ np->stats.tx_bytes += skb->len;
+ }
+ nv_release_txskb(dev, i);
}
}
- pci_unmap_single(np->pci_dev, np->tx_dma[i],
- np->tx_skbuff[i]->len,
- PCI_DMA_TODEVICE);
- dev_kfree_skb_irq(np->tx_skbuff[i]);
- np->tx_skbuff[i] = NULL;
np->nic_tx++;
}
if (np->next_tx - np->nic_tx < TX_LIMIT_START)
@@ -1067,7 +1146,7 @@ static void nv_tx_done(struct net_device *dev)
*/
static void nv_tx_timeout(struct net_device *dev)
{
- struct fe_priv *np = get_nvpriv(dev);
+ struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
printk(KERN_INFO "%s: Got tx_timeout. irq: %08x\n", dev->name,
@@ -1200,7 +1279,7 @@ static int nv_getlen(struct net_device *dev, void *packet, int datalen)
static void nv_rx_process(struct net_device *dev)
{
- struct fe_priv *np = get_nvpriv(dev);
+ struct fe_priv *np = netdev_priv(dev);
u32 Flags;
for (;;) {
@@ -1355,7 +1434,7 @@ static void set_bufsize(struct net_device *dev)
*/
static int nv_change_mtu(struct net_device *dev, int new_mtu)
{
- struct fe_priv *np = get_nvpriv(dev);
+ struct fe_priv *np = netdev_priv(dev);
int old_mtu;
if (new_mtu < 64 || new_mtu > np->pkt_limit)
@@ -1408,7 +1487,7 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu)
writel( ((RX_RING-1) << NVREG_RINGSZ_RXSHIFT) + ((TX_RING-1) << NVREG_RINGSZ_TXSHIFT),
base + NvRegRingSizes);
pci_push(base);
- writel(NVREG_TXRXCTL_KICK|np->desc_ver, get_hwbase(dev) + NvRegTxRxControl);
+ writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
pci_push(base);
/* restart rx engine */
@@ -1440,7 +1519,7 @@ static void nv_copy_mac_to_hw(struct net_device *dev)
*/
static int nv_set_mac_address(struct net_device *dev, void *addr)
{
- struct fe_priv *np = get_nvpriv(dev);
+ struct fe_priv *np = netdev_priv(dev);
struct sockaddr *macaddr = (struct sockaddr*)addr;
if(!is_valid_ether_addr(macaddr->sa_data))
@@ -1475,7 +1554,7 @@ static int nv_set_mac_address(struct net_device *dev, void *addr)
*/
static void nv_set_multicast(struct net_device *dev)
{
- struct fe_priv *np = get_nvpriv(dev);
+ struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
u32 addr[2];
u32 mask[2];
@@ -1535,7 +1614,7 @@ static void nv_set_multicast(struct net_device *dev)
static int nv_update_linkspeed(struct net_device *dev)
{
- struct fe_priv *np = get_nvpriv(dev);
+ struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
int adv, lpa;
int newls = np->linkspeed;
@@ -1705,7 +1784,7 @@ static void nv_link_irq(struct net_device *dev)
static irqreturn_t nv_nic_irq(int foo, void *data, struct pt_regs *regs)
{
struct net_device *dev = (struct net_device *) data;
- struct fe_priv *np = get_nvpriv(dev);
+ struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
u32 events;
int i;
@@ -1777,7 +1856,7 @@ static irqreturn_t nv_nic_irq(int foo, void *data, struct pt_regs *regs)
static void nv_do_nic_poll(unsigned long data)
{
struct net_device *dev = (struct net_device *) data;
- struct fe_priv *np = get_nvpriv(dev);
+ struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
disable_irq(dev->irq);
@@ -1801,7 +1880,7 @@ static void nv_poll_controller(struct net_device *dev)
static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
- struct fe_priv *np = get_nvpriv(dev);
+ struct fe_priv *np = netdev_priv(dev);
strcpy(info->driver, "forcedeth");
strcpy(info->version, FORCEDETH_VERSION);
strcpy(info->bus_info, pci_name(np->pci_dev));
@@ -1809,7 +1888,7 @@ static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
{
- struct fe_priv *np = get_nvpriv(dev);
+ struct fe_priv *np = netdev_priv(dev);
wolinfo->supported = WAKE_MAGIC;
spin_lock_irq(&np->lock);
@@ -1820,7 +1899,7 @@ static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
{
- struct fe_priv *np = get_nvpriv(dev);
+ struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
spin_lock_irq(&np->lock);
@@ -2021,7 +2100,7 @@ static int nv_get_regs_len(struct net_device *dev)
static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf)
{
- struct fe_priv *np = get_nvpriv(dev);
+ struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
u32 *rbuf = buf;
int i;
@@ -2035,7 +2114,7 @@ static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void
static int nv_nway_reset(struct net_device *dev)
{
- struct fe_priv *np = get_nvpriv(dev);
+ struct fe_priv *np = netdev_priv(dev);
int ret;
spin_lock_irq(&np->lock);
@@ -2065,11 +2144,12 @@ static struct ethtool_ops ops = {
.get_regs_len = nv_get_regs_len,
.get_regs = nv_get_regs,
.nway_reset = nv_nway_reset,
+ .get_perm_addr = ethtool_op_get_perm_addr,
};
static int nv_open(struct net_device *dev)
{
- struct fe_priv *np = get_nvpriv(dev);
+ struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
int ret, oom, i;
@@ -2114,9 +2194,9 @@ static int nv_open(struct net_device *dev)
/* 5) continue setup */
writel(np->linkspeed, base + NvRegLinkSpeed);
writel(NVREG_UNKSETUP3_VAL1, base + NvRegUnknownSetupReg3);
- writel(np->desc_ver, base + NvRegTxRxControl);
+ writel(np->txrxctl_bits, base + NvRegTxRxControl);
pci_push(base);
- writel(NVREG_TXRXCTL_BIT1|np->desc_ver, base + NvRegTxRxControl);
+ writel(NVREG_TXRXCTL_BIT1|np->txrxctl_bits, base + NvRegTxRxControl);
reg_delay(dev, NvRegUnknownSetupReg5, NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31,
NV_SETUP5_DELAY, NV_SETUP5_DELAYMAX,
KERN_INFO "open: SetupReg5, Bit 31 remained off\n");
@@ -2205,7 +2285,7 @@ out_drain:
static int nv_close(struct net_device *dev)
{
- struct fe_priv *np = get_nvpriv(dev);
+ struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base;
spin_lock_irq(&np->lock);
@@ -2261,7 +2341,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
if (!dev)
goto out;
- np = get_nvpriv(dev);
+ np = netdev_priv(dev);
np->pci_dev = pci_dev;
spin_lock_init(&np->lock);
SET_MODULE_OWNER(dev);
@@ -2313,19 +2393,32 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
if (pci_set_dma_mask(pci_dev, 0x0000007fffffffffULL)) {
printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n",
pci_name(pci_dev));
+ } else {
+ dev->features |= NETIF_F_HIGHDMA;
}
+ np->txrxctl_bits = NVREG_TXRXCTL_DESC_3;
} else if (id->driver_data & DEV_HAS_LARGEDESC) {
/* packet format 2: supports jumbo frames */
np->desc_ver = DESC_VER_2;
+ np->txrxctl_bits = NVREG_TXRXCTL_DESC_2;
} else {
/* original packet format */
np->desc_ver = DESC_VER_1;
+ np->txrxctl_bits = NVREG_TXRXCTL_DESC_1;
}
np->pkt_limit = NV_PKTLIMIT_1;
if (id->driver_data & DEV_HAS_LARGEDESC)
np->pkt_limit = NV_PKTLIMIT_2;
+ if (id->driver_data & DEV_HAS_CHECKSUM) {
+ np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
+ dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
+#ifdef NETIF_F_TSO
+ dev->features |= NETIF_F_TSO;
+#endif
+ }
+
err = -ENOMEM;
np->base = ioremap(addr, NV_PCI_REGSZ);
if (!np->base)
@@ -2377,8 +2470,9 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff;
dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff;
dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff;
+ memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
- if (!is_valid_ether_addr(dev->dev_addr)) {
+ if (!is_valid_ether_addr(dev->perm_addr)) {
/*
* Bad mac address. At least one bios sets the mac address
* to 01:23:45:67:89:ab
@@ -2403,9 +2497,9 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
np->wolenabled = 0;
if (np->desc_ver == DESC_VER_1) {
- np->tx_flags = NV_TX_LASTPACKET|NV_TX_VALID;
+ np->tx_flags = NV_TX_VALID;
} else {
- np->tx_flags = NV_TX2_LASTPACKET|NV_TX2_VALID;
+ np->tx_flags = NV_TX2_VALID;
}
np->irqmask = NVREG_IRQMASK_WANTED;
if (id->driver_data & DEV_NEED_TIMERIRQ)
@@ -2494,7 +2588,7 @@ out:
static void __devexit nv_remove(struct pci_dev *pci_dev)
{
struct net_device *dev = pci_get_drvdata(pci_dev);
- struct fe_priv *np = get_nvpriv(dev);
+ struct fe_priv *np = netdev_priv(dev);
unregister_netdev(dev);
@@ -2525,35 +2619,35 @@ static struct pci_device_id pci_tbl[] = {
},
{ /* nForce3 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_4),
- .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC,
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
},
{ /* nForce3 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_5),
- .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC,
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
},
{ /* nForce3 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_6),
- .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC,
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
},
{ /* nForce3 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_7),
- .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC,
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
},
{ /* CK804 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_8),
- .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA,
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA,
},
{ /* CK804 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_9),
- .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA,
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA,
},
{ /* MCP04 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_10),
- .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA,
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA,
},
{ /* MCP04 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_11),
- .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA,
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA,
},
{ /* MCP51 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_12),
@@ -2565,11 +2659,11 @@ static struct pci_device_id pci_tbl[] = {
},
{ /* MCP55 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14),
- .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA,
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA,
},
{ /* MCP55 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15),
- .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA,
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA,
},
{0,},
};
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 6518334b928..ae5a2ed3b26 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -29,12 +29,7 @@
* define the configuration needed by the board are defined in a
* board structure in arch/ppc/platforms (though I do not
* discount the possibility that other architectures could one
- * day be supported. One assumption the driver currently makes
- * is that the PHY is configured in such a way to advertise all
- * capabilities. This is a sensible default, and on certain
- * PHYs, changing this default encounters substantial errata
- * issues. Future versions may remove this requirement, but for
- * now, it is best for the firmware to ensure this is the case.
+ * day be supported.
*
* The Gianfar Ethernet Controller uses a ring of buffer
* descriptors. The beginning is indicated by a register
@@ -47,7 +42,7 @@
* corresponding bit in the IMASK register is also set (if
* interrupt coalescing is active, then the interrupt may not
* happen immediately, but will wait until either a set number
- * of frames or amount of time have passed.). In NAPI, the
+ * of frames or amount of time have passed). In NAPI, the
* interrupt handler will signal there is work to be done, and
* exit. Without NAPI, the packet(s) will be handled
* immediately. Both methods will start at the last known empty
@@ -75,6 +70,7 @@
#include <linux/sched.h>
#include <linux/string.h>
#include <linux/errno.h>
+#include <linux/unistd.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/init.h>
@@ -97,9 +93,11 @@
#include <linux/version.h>
#include <linux/dma-mapping.h>
#include <linux/crc32.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
#include "gianfar.h"
-#include "gianfar_phy.h"
+#include "gianfar_mii.h"
#define TX_TIMEOUT (1*HZ)
#define SKB_ALLOC_TIMEOUT 1000000
@@ -113,9 +111,8 @@
#endif
const char gfar_driver_name[] = "Gianfar Ethernet";
-const char gfar_driver_version[] = "1.1";
+const char gfar_driver_version[] = "1.2";
-int startup_gfar(struct net_device *dev);
static int gfar_enet_open(struct net_device *dev);
static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
static void gfar_timeout(struct net_device *dev);
@@ -126,17 +123,13 @@ static int gfar_set_mac_address(struct net_device *dev);
static int gfar_change_mtu(struct net_device *dev, int new_mtu);
static irqreturn_t gfar_error(int irq, void *dev_id, struct pt_regs *regs);
static irqreturn_t gfar_transmit(int irq, void *dev_id, struct pt_regs *regs);
-static irqreturn_t gfar_receive(int irq, void *dev_id, struct pt_regs *regs);
static irqreturn_t gfar_interrupt(int irq, void *dev_id, struct pt_regs *regs);
-static irqreturn_t phy_interrupt(int irq, void *dev_id, struct pt_regs *regs);
-static void gfar_phy_change(void *data);
-static void gfar_phy_timer(unsigned long data);
static void adjust_link(struct net_device *dev);
static void init_registers(struct net_device *dev);
static int init_phy(struct net_device *dev);
static int gfar_probe(struct device *device);
static int gfar_remove(struct device *device);
-void free_skb_resources(struct gfar_private *priv);
+static void free_skb_resources(struct gfar_private *priv);
static void gfar_set_multi(struct net_device *dev);
static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
#ifdef CONFIG_GFAR_NAPI
@@ -144,7 +137,6 @@ static int gfar_poll(struct net_device *dev, int *budget);
#endif
int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit);
static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, int length);
-static void gfar_phy_startup_timer(unsigned long data);
static void gfar_vlan_rx_register(struct net_device *netdev,
struct vlan_group *grp);
static void gfar_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid);
@@ -162,6 +154,9 @@ int gfar_uses_fcb(struct gfar_private *priv)
else
return 0;
}
+
+/* Set up the ethernet device structure, private data,
+ * and anything else we need before we start */
static int gfar_probe(struct device *device)
{
u32 tempval;
@@ -175,7 +170,7 @@ static int gfar_probe(struct device *device)
einfo = (struct gianfar_platform_data *) pdev->dev.platform_data;
- if (einfo == NULL) {
+ if (NULL == einfo) {
printk(KERN_ERR "gfar %d: Missing additional data!\n",
pdev->id);
@@ -185,7 +180,7 @@ static int gfar_probe(struct device *device)
/* Create an ethernet device instance */
dev = alloc_etherdev(sizeof (*priv));
- if (dev == NULL)
+ if (NULL == dev)
return -ENOMEM;
priv = netdev_priv(dev);
@@ -207,20 +202,11 @@ static int gfar_probe(struct device *device)
priv->regs = (struct gfar *)
ioremap(r->start, sizeof (struct gfar));
- if (priv->regs == NULL) {
+ if (NULL == priv->regs) {
err = -ENOMEM;
goto regs_fail;
}
- /* Set the PHY base address */
- priv->phyregs = (struct gfar *)
- ioremap(einfo->phy_reg_addr, sizeof (struct gfar));
-
- if (priv->phyregs == NULL) {
- err = -ENOMEM;
- goto phy_regs_fail;
- }
-
spin_lock_init(&priv->lock);
dev_set_drvdata(device, dev);
@@ -386,12 +372,10 @@ static int gfar_probe(struct device *device)
return 0;
register_fail:
- iounmap((void *) priv->phyregs);
-phy_regs_fail:
iounmap((void *) priv->regs);
regs_fail:
free_netdev(dev);
- return -ENOMEM;
+ return err;
}
static int gfar_remove(struct device *device)
@@ -402,108 +386,41 @@ static int gfar_remove(struct device *device)
dev_set_drvdata(device, NULL);
iounmap((void *) priv->regs);
- iounmap((void *) priv->phyregs);
free_netdev(dev);
return 0;
}
-/* Configure the PHY for dev.
- * returns 0 if success. -1 if failure
+/* Initializes driver's PHY state, and attaches to the PHY.
+ * Returns 0 on success.
*/
static int init_phy(struct net_device *dev)
{
struct gfar_private *priv = netdev_priv(dev);
- struct phy_info *curphy;
- unsigned int timeout = PHY_INIT_TIMEOUT;
- struct gfar *phyregs = priv->phyregs;
- struct gfar_mii_info *mii_info;
- int err;
+ uint gigabit_support =
+ priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
+ SUPPORTED_1000baseT_Full : 0;
+ struct phy_device *phydev;
priv->oldlink = 0;
priv->oldspeed = 0;
priv->oldduplex = -1;
- mii_info = kmalloc(sizeof(struct gfar_mii_info),
- GFP_KERNEL);
-
- if(NULL == mii_info) {
- if (netif_msg_ifup(priv))
- printk(KERN_ERR "%s: Could not allocate mii_info\n",
- dev->name);
- return -ENOMEM;
- }
-
- mii_info->speed = SPEED_1000;
- mii_info->duplex = DUPLEX_FULL;
- mii_info->pause = 0;
- mii_info->link = 1;
-
- mii_info->advertising = (ADVERTISED_10baseT_Half |
- ADVERTISED_10baseT_Full |
- ADVERTISED_100baseT_Half |
- ADVERTISED_100baseT_Full |
- ADVERTISED_1000baseT_Full);
- mii_info->autoneg = 1;
+ phydev = phy_connect(dev, priv->einfo->bus_id, &adjust_link, 0);
- spin_lock_init(&mii_info->mdio_lock);
-
- mii_info->mii_id = priv->einfo->phyid;
-
- mii_info->dev = dev;
-
- mii_info->mdio_read = &read_phy_reg;
- mii_info->mdio_write = &write_phy_reg;
-
- priv->mii_info = mii_info;
-
- /* Reset the management interface */
- gfar_write(&phyregs->miimcfg, MIIMCFG_RESET);
-
- /* Setup the MII Mgmt clock speed */
- gfar_write(&phyregs->miimcfg, MIIMCFG_INIT_VALUE);
-
- /* Wait until the bus is free */
- while ((gfar_read(&phyregs->miimind) & MIIMIND_BUSY) &&
- timeout--)
- cpu_relax();
-
- if(timeout <= 0) {
- printk(KERN_ERR "%s: The MII Bus is stuck!\n",
- dev->name);
- err = -1;
- goto bus_fail;
- }
-
- /* get info for this PHY */
- curphy = get_phy_info(priv->mii_info);
-
- if (curphy == NULL) {
- if (netif_msg_ifup(priv))
- printk(KERN_ERR "%s: No PHY found\n", dev->name);
- err = -1;
- goto no_phy;
+ if (IS_ERR(phydev)) {
+ printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
+ return PTR_ERR(phydev);
}
- mii_info->phyinfo = curphy;
+ /* Remove any features not supported by the controller */
+ phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
+ phydev->advertising = phydev->supported;
- /* Run the commands which initialize the PHY */
- if(curphy->init) {
- err = curphy->init(priv->mii_info);
-
- if (err)
- goto phy_init_fail;
- }
+ priv->phydev = phydev;
return 0;
-
-phy_init_fail:
-no_phy:
-bus_fail:
- kfree(mii_info);
-
- return err;
}
static void init_registers(struct net_device *dev)
@@ -603,24 +520,13 @@ void stop_gfar(struct net_device *dev)
struct gfar *regs = priv->regs;
unsigned long flags;
+ phy_stop(priv->phydev);
+
/* Lock it down */
spin_lock_irqsave(&priv->lock, flags);
- /* Tell the kernel the link is down */
- priv->mii_info->link = 0;
- adjust_link(dev);
-
gfar_halt(dev);
- if (priv->einfo->board_flags & FSL_GIANFAR_BRD_HAS_PHY_INTR) {
- /* Clear any pending interrupts */
- mii_clear_phy_interrupt(priv->mii_info);
-
- /* Disable PHY Interrupts */
- mii_configure_phy_interrupt(priv->mii_info,
- MII_INTERRUPT_DISABLED);
- }
-
spin_unlock_irqrestore(&priv->lock, flags);
/* Free the IRQs */
@@ -629,13 +535,7 @@ void stop_gfar(struct net_device *dev)
free_irq(priv->interruptTransmit, dev);
free_irq(priv->interruptReceive, dev);
} else {
- free_irq(priv->interruptTransmit, dev);
- }
-
- if (priv->einfo->board_flags & FSL_GIANFAR_BRD_HAS_PHY_INTR) {
- free_irq(priv->einfo->interruptPHY, dev);
- } else {
- del_timer_sync(&priv->phy_info_timer);
+ free_irq(priv->interruptTransmit, dev);
}
free_skb_resources(priv);
@@ -649,7 +549,7 @@ void stop_gfar(struct net_device *dev)
/* If there are any tx skbs or rx skbs still around, free them.
* Then free tx_skbuff and rx_skbuff */
-void free_skb_resources(struct gfar_private *priv)
+static void free_skb_resources(struct gfar_private *priv)
{
struct rxbd8 *rxbdp;
struct txbd8 *txbdp;
@@ -770,7 +670,7 @@ int startup_gfar(struct net_device *dev)
(struct sk_buff **) kmalloc(sizeof (struct sk_buff *) *
priv->tx_ring_size, GFP_KERNEL);
- if (priv->tx_skbuff == NULL) {
+ if (NULL == priv->tx_skbuff) {
if (netif_msg_ifup(priv))
printk(KERN_ERR "%s: Could not allocate tx_skbuff\n",
dev->name);
@@ -785,7 +685,7 @@ int startup_gfar(struct net_device *dev)
(struct sk_buff **) kmalloc(sizeof (struct sk_buff *) *
priv->rx_ring_size, GFP_KERNEL);
- if (priv->rx_skbuff == NULL) {
+ if (NULL == priv->rx_skbuff) {
if (netif_msg_ifup(priv))
printk(KERN_ERR "%s: Could not allocate rx_skbuff\n",
dev->name);
@@ -879,13 +779,7 @@ int startup_gfar(struct net_device *dev)
}
}
- /* Set up the PHY change work queue */
- INIT_WORK(&priv->tq, gfar_phy_change, dev);
-
- init_timer(&priv->phy_info_timer);
- priv->phy_info_timer.function = &gfar_phy_startup_timer;
- priv->phy_info_timer.data = (unsigned long) priv->mii_info;
- mod_timer(&priv->phy_info_timer, jiffies + HZ);
+ phy_start(priv->phydev);
/* Configure the coalescing support */
if (priv->txcoalescing)
@@ -933,11 +827,6 @@ tx_skb_fail:
priv->tx_bd_base,
gfar_read(&regs->tbase0));
- if (priv->mii_info->phyinfo->close)
- priv->mii_info->phyinfo->close(priv->mii_info);
-
- kfree(priv->mii_info);
-
return err;
}
@@ -1035,7 +924,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
txbdp->status &= TXBD_WRAP;
/* Set up checksumming */
- if ((dev->features & NETIF_F_IP_CSUM)
+ if ((dev->features & NETIF_F_IP_CSUM)
&& (CHECKSUM_HW == skb->ip_summed)) {
fcb = gfar_add_fcb(skb, txbdp);
gfar_tx_checksum(skb, fcb);
@@ -1103,11 +992,9 @@ static int gfar_close(struct net_device *dev)
struct gfar_private *priv = netdev_priv(dev);
stop_gfar(dev);
- /* Shutdown the PHY */
- if (priv->mii_info->phyinfo->close)
- priv->mii_info->phyinfo->close(priv->mii_info);
-
- kfree(priv->mii_info);
+ /* Disconnect from the PHY */
+ phy_disconnect(priv->phydev);
+ priv->phydev = NULL;
netif_stop_queue(dev);
@@ -1343,7 +1230,7 @@ struct sk_buff * gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp)
while ((!skb) && timeout--)
skb = dev_alloc_skb(priv->rx_buffer_size + RXBUF_ALIGNMENT);
- if (skb == NULL)
+ if (NULL == skb)
return NULL;
/* We need the data buffer to be aligned properly. We will reserve
@@ -1490,7 +1377,7 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
struct gfar_private *priv = netdev_priv(dev);
struct rxfcb *fcb = NULL;
- if (skb == NULL) {
+ if (NULL == skb) {
if (netif_msg_rx_err(priv))
printk(KERN_WARNING "%s: Missing skb!!.\n", dev->name);
priv->stats.rx_dropped++;
@@ -1718,131 +1605,9 @@ static irqreturn_t gfar_interrupt(int irq, void *dev_id, struct pt_regs *regs)
return IRQ_HANDLED;
}
-static irqreturn_t phy_interrupt(int irq, void *dev_id, struct pt_regs *regs)
-{
- struct net_device *dev = (struct net_device *) dev_id;
- struct gfar_private *priv = netdev_priv(dev);
-
- /* Clear the interrupt */
- mii_clear_phy_interrupt(priv->mii_info);
-
- /* Disable PHY interrupts */
- mii_configure_phy_interrupt(priv->mii_info,
- MII_INTERRUPT_DISABLED);
-
- /* Schedule the phy change */
- schedule_work(&priv->tq);
-
- return IRQ_HANDLED;
-}
-
-/* Scheduled by the phy_interrupt/timer to handle PHY changes */
-static void gfar_phy_change(void *data)
-{
- struct net_device *dev = (struct net_device *) data;
- struct gfar_private *priv = netdev_priv(dev);
- int result = 0;
-
- /* Delay to give the PHY a chance to change the
- * register state */
- msleep(1);
-
- /* Update the link, speed, duplex */
- result = priv->mii_info->phyinfo->read_status(priv->mii_info);
-
- /* Adjust the known status as long as the link
- * isn't still coming up */
- if((0 == result) || (priv->mii_info->link == 0))
- adjust_link(dev);
-
- /* Reenable interrupts, if needed */
- if (priv->einfo->board_flags & FSL_GIANFAR_BRD_HAS_PHY_INTR)
- mii_configure_phy_interrupt(priv->mii_info,
- MII_INTERRUPT_ENABLED);
-}
-
-/* Called every so often on systems that don't interrupt
- * the core for PHY changes */
-static void gfar_phy_timer(unsigned long data)
-{
- struct net_device *dev = (struct net_device *) data;
- struct gfar_private *priv = netdev_priv(dev);
-
- schedule_work(&priv->tq);
-
- mod_timer(&priv->phy_info_timer, jiffies +
- GFAR_PHY_CHANGE_TIME * HZ);
-}
-
-/* Keep trying aneg for some time
- * If, after GFAR_AN_TIMEOUT seconds, it has not
- * finished, we switch to forced.
- * Either way, once the process has completed, we either
- * request the interrupt, or switch the timer over to
- * using gfar_phy_timer to check status */
-static void gfar_phy_startup_timer(unsigned long data)
-{
- int result;
- static int secondary = GFAR_AN_TIMEOUT;
- struct gfar_mii_info *mii_info = (struct gfar_mii_info *)data;
- struct gfar_private *priv = netdev_priv(mii_info->dev);
-
- /* Configure the Auto-negotiation */
- result = mii_info->phyinfo->config_aneg(mii_info);
-
- /* If autonegotiation failed to start, and
- * we haven't timed out, reset the timer, and return */
- if (result && secondary--) {
- mod_timer(&priv->phy_info_timer, jiffies + HZ);
- return;
- } else if (result) {
- /* Couldn't start autonegotiation.
- * Try switching to forced */
- mii_info->autoneg = 0;
- result = mii_info->phyinfo->config_aneg(mii_info);
-
- /* Forcing failed! Give up */
- if(result) {
- if (netif_msg_link(priv))
- printk(KERN_ERR "%s: Forcing failed!\n",
- mii_info->dev->name);
- return;
- }
- }
-
- /* Kill the timer so it can be restarted */
- del_timer_sync(&priv->phy_info_timer);
-
- /* Grab the PHY interrupt, if necessary/possible */
- if (priv->einfo->board_flags & FSL_GIANFAR_BRD_HAS_PHY_INTR) {
- if (request_irq(priv->einfo->interruptPHY,
- phy_interrupt,
- SA_SHIRQ,
- "phy_interrupt",
- mii_info->dev) < 0) {
- if (netif_msg_intr(priv))
- printk(KERN_ERR "%s: Can't get IRQ %d (PHY)\n",
- mii_info->dev->name,
- priv->einfo->interruptPHY);
- } else {
- mii_configure_phy_interrupt(priv->mii_info,
- MII_INTERRUPT_ENABLED);
- return;
- }
- }
-
- /* Start the timer again, this time in order to
- * handle a change in status */
- init_timer(&priv->phy_info_timer);
- priv->phy_info_timer.function = &gfar_phy_timer;
- priv->phy_info_timer.data = (unsigned long) mii_info->dev;
- mod_timer(&priv->phy_info_timer, jiffies +
- GFAR_PHY_CHANGE_TIME * HZ);
-}
-
/* Called every time the controller might need to be made
* aware of new link state. The PHY code conveys this
- * information through variables in the priv structure, and this
+ * information through variables in the phydev structure, and this
* function converts those variables into the appropriate
* register values, and can bring down the device if needed.
*/
@@ -1850,84 +1615,68 @@ static void adjust_link(struct net_device *dev)
{
struct gfar_private *priv = netdev_priv(dev);
struct gfar *regs = priv->regs;
- u32 tempval;
- struct gfar_mii_info *mii_info = priv->mii_info;
+ unsigned long flags;
+ struct phy_device *phydev = priv->phydev;
+ int new_state = 0;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ if (phydev->link) {
+ u32 tempval = gfar_read(&regs->maccfg2);
- if (mii_info->link) {
/* Now we make sure that we can be in full duplex mode.
* If not, we operate in half-duplex mode. */
- if (mii_info->duplex != priv->oldduplex) {
- if (!(mii_info->duplex)) {
- tempval = gfar_read(&regs->maccfg2);
+ if (phydev->duplex != priv->oldduplex) {
+ new_state = 1;
+ if (!(phydev->duplex))
tempval &= ~(MACCFG2_FULL_DUPLEX);
- gfar_write(&regs->maccfg2, tempval);
-
- if (netif_msg_link(priv))
- printk(KERN_INFO "%s: Half Duplex\n",
- dev->name);
- } else {
- tempval = gfar_read(&regs->maccfg2);
+ else
tempval |= MACCFG2_FULL_DUPLEX;
- gfar_write(&regs->maccfg2, tempval);
- if (netif_msg_link(priv))
- printk(KERN_INFO "%s: Full Duplex\n",
- dev->name);
- }
-
- priv->oldduplex = mii_info->duplex;
+ priv->oldduplex = phydev->duplex;
}
- if (mii_info->speed != priv->oldspeed) {
- switch (mii_info->speed) {
+ if (phydev->speed != priv->oldspeed) {
+ new_state = 1;
+ switch (phydev->speed) {
case 1000:
- tempval = gfar_read(&regs->maccfg2);
tempval =
((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
- gfar_write(&regs->maccfg2, tempval);
break;
case 100:
case 10:
- tempval = gfar_read(&regs->maccfg2);
tempval =
((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
- gfar_write(&regs->maccfg2, tempval);
break;
default:
if (netif_msg_link(priv))
printk(KERN_WARNING
- "%s: Ack! Speed (%d) is not 10/100/1000!\n",
- dev->name, mii_info->speed);
+ "%s: Ack! Speed (%d) is not 10/100/1000!\n",
+ dev->name, phydev->speed);
break;
}
- if (netif_msg_link(priv))
- printk(KERN_INFO "%s: Speed %dBT\n", dev->name,
- mii_info->speed);
-
- priv->oldspeed = mii_info->speed;
+ priv->oldspeed = phydev->speed;
}
+ gfar_write(&regs->maccfg2, tempval);
+
if (!priv->oldlink) {
- if (netif_msg_link(priv))
- printk(KERN_INFO "%s: Link is up\n", dev->name);
+ new_state = 1;
priv->oldlink = 1;
- netif_carrier_on(dev);
netif_schedule(dev);
}
- } else {
- if (priv->oldlink) {
- if (netif_msg_link(priv))
- printk(KERN_INFO "%s: Link is down\n",
- dev->name);
- priv->oldlink = 0;
- priv->oldspeed = 0;
- priv->oldduplex = -1;
- netif_carrier_off(dev);
- }
+ } else if (priv->oldlink) {
+ new_state = 1;
+ priv->oldlink = 0;
+ priv->oldspeed = 0;
+ priv->oldduplex = -1;
}
-}
+ if (new_state && netif_msg_link(priv))
+ phy_print_status(phydev);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
/* Update the hash table based on the current list of multicast
* addresses we subscribe to. Also, change the promiscuity of
@@ -2122,12 +1871,23 @@ static struct device_driver gfar_driver = {
static int __init gfar_init(void)
{
- return driver_register(&gfar_driver);
+ int err = gfar_mdio_init();
+
+ if (err)
+ return err;
+
+ err = driver_register(&gfar_driver);
+
+ if (err)
+ gfar_mdio_exit();
+
+ return err;
}
static void __exit gfar_exit(void)
{
driver_unregister(&gfar_driver);
+ gfar_mdio_exit();
}
module_init(gfar_init);
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h
index 28af087d9fb..c77ca6c0d04 100644
--- a/drivers/net/gianfar.h
+++ b/drivers/net/gianfar.h
@@ -17,7 +17,6 @@
*
* Still left to do:
* -Add support for module parameters
- * -Add support for ethtool -s
* -Add patch for ethtool phys id
*/
#ifndef __GIANFAR_H
@@ -37,7 +36,8 @@
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <linux/mm.h>
-#include <linux/fsl_devices.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
#include <asm/io.h>
#include <asm/irq.h>
@@ -48,7 +48,8 @@
#include <linux/workqueue.h>
#include <linux/ethtool.h>
#include <linux/netdevice.h>
-#include "gianfar_phy.h"
+#include <linux/fsl_devices.h>
+#include "gianfar_mii.h"
/* The maximum number of packets to be handled in one call of gfar_poll */
#define GFAR_DEV_WEIGHT 64
@@ -73,7 +74,7 @@
#define PHY_INIT_TIMEOUT 100000
#define GFAR_PHY_CHANGE_TIME 2
-#define DEVICE_NAME "%s: Gianfar Ethernet Controller Version 1.1, "
+#define DEVICE_NAME "%s: Gianfar Ethernet Controller Version 1.2, "
#define DRV_NAME "gfar-enet"
extern const char gfar_driver_name[];
extern const char gfar_driver_version[];
@@ -578,12 +579,7 @@ struct gfar {
u32 hafdup; /* 0x.50c - Half Duplex Register */
u32 maxfrm; /* 0x.510 - Maximum Frame Length Register */
u8 res18[12];
- u32 miimcfg; /* 0x.520 - MII Management Configuration Register */
- u32 miimcom; /* 0x.524 - MII Management Command Register */
- u32 miimadd; /* 0x.528 - MII Management Address Register */
- u32 miimcon; /* 0x.52c - MII Management Control Register */
- u32 miimstat; /* 0x.530 - MII Management Status Register */
- u32 miimind; /* 0x.534 - MII Management Indicator Register */
+ u8 gfar_mii_regs[24]; /* See gianfar_phy.h */
u8 res19[4];
u32 ifstat; /* 0x.53c - Interface Status Register */
u32 macstnaddr1; /* 0x.540 - Station Address Part 1 Register */
@@ -688,9 +684,6 @@ struct gfar_private {
struct gfar *regs; /* Pointer to the GFAR memory mapped Registers */
u32 *hash_regs[16];
int hash_width;
- struct gfar *phyregs;
- struct work_struct tq;
- struct timer_list phy_info_timer;
struct net_device_stats stats; /* linux network statistics */
struct gfar_extra_stats extra_stats;
spinlock_t lock;
@@ -710,7 +703,8 @@ struct gfar_private {
unsigned int interruptError;
struct gianfar_platform_data *einfo;
- struct gfar_mii_info *mii_info;
+ struct phy_device *phydev;
+ struct mii_bus *mii_bus;
int oldspeed;
int oldduplex;
int oldlink;
@@ -732,4 +726,12 @@ extern inline void gfar_write(volatile unsigned *addr, u32 val)
extern struct ethtool_ops *gfar_op_array[];
+extern irqreturn_t gfar_receive(int irq, void *dev_id, struct pt_regs *regs);
+extern int startup_gfar(struct net_device *dev);
+extern void stop_gfar(struct net_device *dev);
+extern void gfar_halt(struct net_device *dev);
+extern void gfar_phy_test(struct mii_bus *bus, struct phy_device *phydev,
+ int enable, u32 regnum, u32 read);
+void gfar_setup_stashing(struct net_device *dev);
+
#endif /* __GIANFAR_H */
diff --git a/drivers/net/gianfar_ethtool.c b/drivers/net/gianfar_ethtool.c
index a451de62919..68e3578e761 100644
--- a/drivers/net/gianfar_ethtool.c
+++ b/drivers/net/gianfar_ethtool.c
@@ -39,17 +39,18 @@
#include <asm/types.h>
#include <asm/uaccess.h>
#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
#include "gianfar.h"
#define is_power_of_2(x) ((x) != 0 && (((x) & ((x) - 1)) == 0))
-extern int startup_gfar(struct net_device *dev);
-extern void stop_gfar(struct net_device *dev);
-extern void gfar_halt(struct net_device *dev);
extern void gfar_start(struct net_device *dev);
extern int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit);
+#define GFAR_MAX_COAL_USECS 0xffff
+#define GFAR_MAX_COAL_FRAMES 0xff
static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
u64 * buf);
static void gfar_gstrings(struct net_device *dev, u32 stringset, u8 * buf);
@@ -182,38 +183,32 @@ static void gfar_gdrvinfo(struct net_device *dev, struct
drvinfo->eedump_len = 0;
}
+
+static int gfar_ssettings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ struct phy_device *phydev = priv->phydev;
+
+ if (NULL == phydev)
+ return -ENODEV;
+
+ return phy_ethtool_sset(phydev, cmd);
+}
+
+
/* Return the current settings in the ethtool_cmd structure */
static int gfar_gsettings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct gfar_private *priv = netdev_priv(dev);
- uint gigabit_support =
- priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
- SUPPORTED_1000baseT_Full : 0;
- uint gigabit_advert =
- priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
- ADVERTISED_1000baseT_Full: 0;
-
- cmd->supported = (SUPPORTED_10baseT_Half
- | SUPPORTED_100baseT_Half
- | SUPPORTED_100baseT_Full
- | gigabit_support | SUPPORTED_Autoneg);
-
- /* For now, we always advertise everything */
- cmd->advertising = (ADVERTISED_10baseT_Half
- | ADVERTISED_100baseT_Half
- | ADVERTISED_100baseT_Full
- | gigabit_advert | ADVERTISED_Autoneg);
-
- cmd->speed = priv->mii_info->speed;
- cmd->duplex = priv->mii_info->duplex;
- cmd->port = PORT_MII;
- cmd->phy_address = priv->mii_info->mii_id;
- cmd->transceiver = XCVR_EXTERNAL;
- cmd->autoneg = AUTONEG_ENABLE;
+ struct phy_device *phydev = priv->phydev;
+
+ if (NULL == phydev)
+ return -ENODEV;
+
cmd->maxtxpkt = priv->txcount;
cmd->maxrxpkt = priv->rxcount;
- return 0;
+ return phy_ethtool_gset(phydev, cmd);
}
/* Return the length of the register structure */
@@ -241,14 +236,14 @@ static unsigned int gfar_usecs2ticks(struct gfar_private *priv, unsigned int use
unsigned int count;
/* The timer is different, depending on the interface speed */
- switch (priv->mii_info->speed) {
- case 1000:
+ switch (priv->phydev->speed) {
+ case SPEED_1000:
count = GFAR_GBIT_TIME;
break;
- case 100:
+ case SPEED_100:
count = GFAR_100_TIME;
break;
- case 10:
+ case SPEED_10:
default:
count = GFAR_10_TIME;
break;
@@ -265,14 +260,14 @@ static unsigned int gfar_ticks2usecs(struct gfar_private *priv, unsigned int tic
unsigned int count;
/* The timer is different, depending on the interface speed */
- switch (priv->mii_info->speed) {
- case 1000:
+ switch (priv->phydev->speed) {
+ case SPEED_1000:
count = GFAR_GBIT_TIME;
break;
- case 100:
+ case SPEED_100:
count = GFAR_100_TIME;
break;
- case 10:
+ case SPEED_10:
default:
count = GFAR_10_TIME;
break;
@@ -292,6 +287,9 @@ static int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals
if (!(priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE))
return -EOPNOTSUPP;
+ if (NULL == priv->phydev)
+ return -ENODEV;
+
cvals->rx_coalesce_usecs = gfar_ticks2usecs(priv, priv->rxtime);
cvals->rx_max_coalesced_frames = priv->rxcount;
@@ -348,6 +346,22 @@ static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals
else
priv->rxcoalescing = 1;
+ if (NULL == priv->phydev)
+ return -ENODEV;
+
+ /* Check the bounds of the values */
+ if (cvals->rx_coalesce_usecs > GFAR_MAX_COAL_USECS) {
+ pr_info("Coalescing is limited to %d microseconds\n",
+ GFAR_MAX_COAL_USECS);
+ return -EINVAL;
+ }
+
+ if (cvals->rx_max_coalesced_frames > GFAR_MAX_COAL_FRAMES) {
+ pr_info("Coalescing is limited to %d frames\n",
+ GFAR_MAX_COAL_FRAMES);
+ return -EINVAL;
+ }
+
priv->rxtime = gfar_usecs2ticks(priv, cvals->rx_coalesce_usecs);
priv->rxcount = cvals->rx_max_coalesced_frames;
@@ -358,6 +372,19 @@ static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals
else
priv->txcoalescing = 1;
+ /* Check the bounds of the values */
+ if (cvals->tx_coalesce_usecs > GFAR_MAX_COAL_USECS) {
+ pr_info("Coalescing is limited to %d microseconds\n",
+ GFAR_MAX_COAL_USECS);
+ return -EINVAL;
+ }
+
+ if (cvals->tx_max_coalesced_frames > GFAR_MAX_COAL_FRAMES) {
+ pr_info("Coalescing is limited to %d frames\n",
+ GFAR_MAX_COAL_FRAMES);
+ return -EINVAL;
+ }
+
priv->txtime = gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs);
priv->txcount = cvals->tx_max_coalesced_frames;
@@ -536,6 +563,7 @@ static void gfar_set_msglevel(struct net_device *dev, uint32_t data)
struct ethtool_ops gfar_ethtool_ops = {
.get_settings = gfar_gsettings,
+ .set_settings = gfar_ssettings,
.get_drvinfo = gfar_gdrvinfo,
.get_regs_len = gfar_reglen,
.get_regs = gfar_get_regs,
diff --git a/drivers/net/gianfar_mii.c b/drivers/net/gianfar_mii.c
new file mode 100644
index 00000000000..1eca1dbca7f
--- /dev/null
+++ b/drivers/net/gianfar_mii.c
@@ -0,0 +1,219 @@
+/*
+ * drivers/net/gianfar_mii.c
+ *
+ * Gianfar Ethernet Driver -- MIIM bus implementation
+ * Provides Bus interface for MIIM regs
+ *
+ * Author: Andy Fleming
+ * Maintainer: Kumar Gala (kumar.gala@freescale.com)
+ *
+ * Copyright (c) 2002-2004 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/unistd.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/version.h>
+#include <asm/ocp.h>
+#include <linux/crc32.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+
+#include "gianfar.h"
+#include "gianfar_mii.h"
+
+/* Write value to the PHY at mii_id at register regnum,
+ * on the bus, waiting until the write is done before returning.
+ * All PHY configuration is done through the TSEC1 MIIM regs */
+int gfar_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value)
+{
+ struct gfar_mii *regs = bus->priv;
+
+ /* Set the PHY address and the register address we want to write */
+ gfar_write(&regs->miimadd, (mii_id << 8) | regnum);
+
+ /* Write out the value we want */
+ gfar_write(&regs->miimcon, value);
+
+ /* Wait for the transaction to finish */
+ while (gfar_read(&regs->miimind) & MIIMIND_BUSY)
+ cpu_relax();
+
+ return 0;
+}
+
+/* Read the bus for PHY at addr mii_id, register regnum, and
+ * return the value. Clears miimcom first. All PHY
+ * configuration has to be done through the TSEC1 MIIM regs */
+int gfar_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
+{
+ struct gfar_mii *regs = bus->priv;
+ u16 value;
+
+ /* Set the PHY address and the register address we want to read */
+ gfar_write(&regs->miimadd, (mii_id << 8) | regnum);
+
+ /* Clear miimcom, and then initiate a read */
+ gfar_write(&regs->miimcom, 0);
+ gfar_write(&regs->miimcom, MII_READ_COMMAND);
+
+ /* Wait for the transaction to finish */
+ while (gfar_read(&regs->miimind) & (MIIMIND_NOTVALID | MIIMIND_BUSY))
+ cpu_relax();
+
+ /* Grab the value of the register from miimstat */
+ value = gfar_read(&regs->miimstat);
+
+ return value;
+}
+
+
+/* Reset the MIIM registers, and wait for the bus to free */
+int gfar_mdio_reset(struct mii_bus *bus)
+{
+ struct gfar_mii *regs = bus->priv;
+ unsigned int timeout = PHY_INIT_TIMEOUT;
+
+ spin_lock_bh(&bus->mdio_lock);
+
+ /* Reset the management interface */
+ gfar_write(&regs->miimcfg, MIIMCFG_RESET);
+
+ /* Setup the MII Mgmt clock speed */
+ gfar_write(&regs->miimcfg, MIIMCFG_INIT_VALUE);
+
+ /* Wait until the bus is free */
+ while ((gfar_read(&regs->miimind) & MIIMIND_BUSY) &&
+ timeout--)
+ cpu_relax();
+
+ spin_unlock_bh(&bus->mdio_lock);
+
+ if(timeout <= 0) {
+ printk(KERN_ERR "%s: The MII Bus is stuck!\n",
+ bus->name);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+
+int gfar_mdio_probe(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct gianfar_mdio_data *pdata;
+ struct gfar_mii *regs;
+ struct mii_bus *new_bus;
+ int err = 0;
+
+ if (NULL == dev)
+ return -EINVAL;
+
+ new_bus = kmalloc(sizeof(struct mii_bus), GFP_KERNEL);
+
+ if (NULL == new_bus)
+ return -ENOMEM;
+
+ new_bus->name = "Gianfar MII Bus",
+ new_bus->read = &gfar_mdio_read,
+ new_bus->write = &gfar_mdio_write,
+ new_bus->reset = &gfar_mdio_reset,
+ new_bus->id = pdev->id;
+
+ pdata = (struct gianfar_mdio_data *)pdev->dev.platform_data;
+
+ if (NULL == pdata) {
+ printk(KERN_ERR "gfar mdio %d: Missing platform data!\n", pdev->id);
+ return -ENODEV;
+ }
+
+ /* Set the PHY base address */
+ regs = (struct gfar_mii *) ioremap(pdata->paddr,
+ sizeof (struct gfar_mii));
+
+ if (NULL == regs) {
+ err = -ENOMEM;
+ goto reg_map_fail;
+ }
+
+ new_bus->priv = regs;
+
+ new_bus->irq = pdata->irq;
+
+ new_bus->dev = dev;
+ dev_set_drvdata(dev, new_bus);
+
+ err = mdiobus_register(new_bus);
+
+ if (0 != err) {
+ printk (KERN_ERR "%s: Cannot register as MDIO bus\n",
+ new_bus->name);
+ goto bus_register_fail;
+ }
+
+ return 0;
+
+bus_register_fail:
+ iounmap((void *) regs);
+reg_map_fail:
+ kfree(new_bus);
+
+ return err;
+}
+
+
+int gfar_mdio_remove(struct device *dev)
+{
+ struct mii_bus *bus = dev_get_drvdata(dev);
+
+ mdiobus_unregister(bus);
+
+ dev_set_drvdata(dev, NULL);
+
+ iounmap((void *) (&bus->priv));
+ bus->priv = NULL;
+ kfree(bus);
+
+ return 0;
+}
+
+static struct device_driver gianfar_mdio_driver = {
+ .name = "fsl-gianfar_mdio",
+ .bus = &platform_bus_type,
+ .probe = gfar_mdio_probe,
+ .remove = gfar_mdio_remove,
+};
+
+int __init gfar_mdio_init(void)
+{
+ return driver_register(&gianfar_mdio_driver);
+}
+
+void __exit gfar_mdio_exit(void)
+{
+ driver_unregister(&gianfar_mdio_driver);
+}
diff --git a/drivers/net/gianfar_mii.h b/drivers/net/gianfar_mii.h
new file mode 100644
index 00000000000..56e5665d5c9
--- /dev/null
+++ b/drivers/net/gianfar_mii.h
@@ -0,0 +1,45 @@
+/*
+ * drivers/net/gianfar_mii.h
+ *
+ * Gianfar Ethernet Driver -- MII Management Bus Implementation
+ * Driver for the MDIO bus controller in the Gianfar register space
+ *
+ * Author: Andy Fleming
+ * Maintainer: Kumar Gala (kumar.gala@freescale.com)
+ *
+ * Copyright (c) 2002-2004 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#ifndef __GIANFAR_MII_H
+#define __GIANFAR_MII_H
+
+#define MIIMIND_BUSY 0x00000001
+#define MIIMIND_NOTVALID 0x00000004
+
+#define MII_READ_COMMAND 0x00000001
+
+#define GFAR_SUPPORTED (SUPPORTED_10baseT_Half \
+ | SUPPORTED_100baseT_Half \
+ | SUPPORTED_100baseT_Full \
+ | SUPPORTED_Autoneg \
+ | SUPPORTED_MII)
+
+struct gfar_mii {
+ u32 miimcfg; /* 0x.520 - MII Management Config Register */
+ u32 miimcom; /* 0x.524 - MII Management Command Register */
+ u32 miimadd; /* 0x.528 - MII Management Address Register */
+ u32 miimcon; /* 0x.52c - MII Management Control Register */
+ u32 miimstat; /* 0x.530 - MII Management Status Register */
+ u32 miimind; /* 0x.534 - MII Management Indicator Register */
+};
+
+int gfar_mdio_read(struct mii_bus *bus, int mii_id, int regnum);
+int gfar_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value);
+int __init gfar_mdio_init(void);
+void __exit gfar_mdio_exit(void);
+#endif /* GIANFAR_PHY_H */
diff --git a/drivers/net/gianfar_phy.c b/drivers/net/gianfar_phy.c
deleted file mode 100644
index 7c965f268a8..00000000000
--- a/drivers/net/gianfar_phy.c
+++ /dev/null
@@ -1,661 +0,0 @@
-/*
- * drivers/net/gianfar_phy.c
- *
- * Gianfar Ethernet Driver -- PHY handling
- * Driver for FEC on MPC8540 and TSEC on MPC8540/MPC8560
- * Based on 8260_io/fcc_enet.c
- *
- * Author: Andy Fleming
- * Maintainer: Kumar Gala (kumar.gala@freescale.com)
- *
- * Copyright (c) 2002-2004 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- */
-
-#include <linux/config.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/spinlock.h>
-#include <linux/mm.h>
-
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/uaccess.h>
-#include <linux/module.h>
-#include <linux/version.h>
-#include <linux/crc32.h>
-#include <linux/mii.h>
-
-#include "gianfar.h"
-#include "gianfar_phy.h"
-
-static void config_genmii_advert(struct gfar_mii_info *mii_info);
-static void genmii_setup_forced(struct gfar_mii_info *mii_info);
-static void genmii_restart_aneg(struct gfar_mii_info *mii_info);
-static int gbit_config_aneg(struct gfar_mii_info *mii_info);
-static int genmii_config_aneg(struct gfar_mii_info *mii_info);
-static int genmii_update_link(struct gfar_mii_info *mii_info);
-static int genmii_read_status(struct gfar_mii_info *mii_info);
-u16 phy_read(struct gfar_mii_info *mii_info, u16 regnum);
-void phy_write(struct gfar_mii_info *mii_info, u16 regnum, u16 val);
-
-/* Write value to the PHY for this device to the register at regnum, */
-/* waiting until the write is done before it returns. All PHY */
-/* configuration has to be done through the TSEC1 MIIM regs */
-void write_phy_reg(struct net_device *dev, int mii_id, int regnum, int value)
-{
- struct gfar_private *priv = netdev_priv(dev);
- struct gfar *regbase = priv->phyregs;
-
- /* Set the PHY address and the register address we want to write */
- gfar_write(&regbase->miimadd, (mii_id << 8) | regnum);
-
- /* Write out the value we want */
- gfar_write(&regbase->miimcon, value);
-
- /* Wait for the transaction to finish */
- while (gfar_read(&regbase->miimind) & MIIMIND_BUSY)
- cpu_relax();
-}
-
-/* Reads from register regnum in the PHY for device dev, */
-/* returning the value. Clears miimcom first. All PHY */
-/* configuration has to be done through the TSEC1 MIIM regs */
-int read_phy_reg(struct net_device *dev, int mii_id, int regnum)
-{
- struct gfar_private *priv = netdev_priv(dev);
- struct gfar *regbase = priv->phyregs;
- u16 value;
-
- /* Set the PHY address and the register address we want to read */
- gfar_write(&regbase->miimadd, (mii_id << 8) | regnum);
-
- /* Clear miimcom, and then initiate a read */
- gfar_write(&regbase->miimcom, 0);
- gfar_write(&regbase->miimcom, MII_READ_COMMAND);
-
- /* Wait for the transaction to finish */
- while (gfar_read(&regbase->miimind) & (MIIMIND_NOTVALID | MIIMIND_BUSY))
- cpu_relax();
-
- /* Grab the value of the register from miimstat */
- value = gfar_read(&regbase->miimstat);
-
- return value;
-}
-
-void mii_clear_phy_interrupt(struct gfar_mii_info *mii_info)
-{
- if(mii_info->phyinfo->ack_interrupt)
- mii_info->phyinfo->ack_interrupt(mii_info);
-}
-
-
-void mii_configure_phy_interrupt(struct gfar_mii_info *mii_info, u32 interrupts)
-{
- mii_info->interrupts = interrupts;
- if(mii_info->phyinfo->config_intr)
- mii_info->phyinfo->config_intr(mii_info);
-}
-
-
-/* Writes MII_ADVERTISE with the appropriate values, after
- * sanitizing advertise to make sure only supported features
- * are advertised
- */
-static void config_genmii_advert(struct gfar_mii_info *mii_info)
-{
- u32 advertise;
- u16 adv;
-
- /* Only allow advertising what this PHY supports */
- mii_info->advertising &= mii_info->phyinfo->features;
- advertise = mii_info->advertising;
-
- /* Setup standard advertisement */
- adv = phy_read(mii_info, MII_ADVERTISE);
- adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
- if (advertise & ADVERTISED_10baseT_Half)
- adv |= ADVERTISE_10HALF;
- if (advertise & ADVERTISED_10baseT_Full)
- adv |= ADVERTISE_10FULL;
- if (advertise & ADVERTISED_100baseT_Half)
- adv |= ADVERTISE_100HALF;
- if (advertise & ADVERTISED_100baseT_Full)
- adv |= ADVERTISE_100FULL;
- phy_write(mii_info, MII_ADVERTISE, adv);
-}
-
-static void genmii_setup_forced(struct gfar_mii_info *mii_info)
-{
- u16 ctrl;
- u32 features = mii_info->phyinfo->features;
-
- ctrl = phy_read(mii_info, MII_BMCR);
-
- ctrl &= ~(BMCR_FULLDPLX|BMCR_SPEED100|BMCR_SPEED1000|BMCR_ANENABLE);
- ctrl |= BMCR_RESET;
-
- switch(mii_info->speed) {
- case SPEED_1000:
- if(features & (SUPPORTED_1000baseT_Half
- | SUPPORTED_1000baseT_Full)) {
- ctrl |= BMCR_SPEED1000;
- break;
- }
- mii_info->speed = SPEED_100;
- case SPEED_100:
- if (features & (SUPPORTED_100baseT_Half
- | SUPPORTED_100baseT_Full)) {
- ctrl |= BMCR_SPEED100;
- break;
- }
- mii_info->speed = SPEED_10;
- case SPEED_10:
- if (features & (SUPPORTED_10baseT_Half
- | SUPPORTED_10baseT_Full))
- break;
- default: /* Unsupported speed! */
- printk(KERN_ERR "%s: Bad speed!\n",
- mii_info->dev->name);
- break;
- }
-
- phy_write(mii_info, MII_BMCR, ctrl);
-}
-
-
-/* Enable and Restart Autonegotiation */
-static void genmii_restart_aneg(struct gfar_mii_info *mii_info)
-{
- u16 ctl;
-
- ctl = phy_read(mii_info, MII_BMCR);
- ctl |= (BMCR_ANENABLE | BMCR_ANRESTART);
- phy_write(mii_info, MII_BMCR, ctl);
-}
-
-
-static int gbit_config_aneg(struct gfar_mii_info *mii_info)
-{
- u16 adv;
- u32 advertise;
-
- if(mii_info->autoneg) {
- /* Configure the ADVERTISE register */
- config_genmii_advert(mii_info);
- advertise = mii_info->advertising;
-
- adv = phy_read(mii_info, MII_1000BASETCONTROL);
- adv &= ~(MII_1000BASETCONTROL_FULLDUPLEXCAP |
- MII_1000BASETCONTROL_HALFDUPLEXCAP);
- if (advertise & SUPPORTED_1000baseT_Half)
- adv |= MII_1000BASETCONTROL_HALFDUPLEXCAP;
- if (advertise & SUPPORTED_1000baseT_Full)
- adv |= MII_1000BASETCONTROL_FULLDUPLEXCAP;
- phy_write(mii_info, MII_1000BASETCONTROL, adv);
-
- /* Start/Restart aneg */
- genmii_restart_aneg(mii_info);
- } else
- genmii_setup_forced(mii_info);
-
- return 0;
-}
-
-static int marvell_config_aneg(struct gfar_mii_info *mii_info)
-{
- /* The Marvell PHY has an errata which requires
- * that certain registers get written in order
- * to restart autonegotiation */
- phy_write(mii_info, MII_BMCR, BMCR_RESET);
-
- phy_write(mii_info, 0x1d, 0x1f);
- phy_write(mii_info, 0x1e, 0x200c);
- phy_write(mii_info, 0x1d, 0x5);
- phy_write(mii_info, 0x1e, 0);
- phy_write(mii_info, 0x1e, 0x100);
-
- gbit_config_aneg(mii_info);
-
- return 0;
-}
-static int genmii_config_aneg(struct gfar_mii_info *mii_info)
-{
- if (mii_info->autoneg) {
- config_genmii_advert(mii_info);
- genmii_restart_aneg(mii_info);
- } else
- genmii_setup_forced(mii_info);
-
- return 0;
-}
-
-
-static int genmii_update_link(struct gfar_mii_info *mii_info)
-{
- u16 status;
-
- /* Do a fake read */
- phy_read(mii_info, MII_BMSR);
-
- /* Read link and autonegotiation status */
- status = phy_read(mii_info, MII_BMSR);
- if ((status & BMSR_LSTATUS) == 0)
- mii_info->link = 0;
- else
- mii_info->link = 1;
-
- /* If we are autonegotiating, and not done,
- * return an error */
- if (mii_info->autoneg && !(status & BMSR_ANEGCOMPLETE))
- return -EAGAIN;
-
- return 0;
-}
-
-static int genmii_read_status(struct gfar_mii_info *mii_info)
-{
- u16 status;
- int err;
-
- /* Update the link, but return if there
- * was an error */
- err = genmii_update_link(mii_info);
- if (err)
- return err;
-
- if (mii_info->autoneg) {
- status = phy_read(mii_info, MII_LPA);
-
- if (status & (LPA_10FULL | LPA_100FULL))
- mii_info->duplex = DUPLEX_FULL;
- else
- mii_info->duplex = DUPLEX_HALF;
- if (status & (LPA_100FULL | LPA_100HALF))
- mii_info->speed = SPEED_100;
- else
- mii_info->speed = SPEED_10;
- mii_info->pause = 0;
- }
- /* On non-aneg, we assume what we put in BMCR is the speed,
- * though magic-aneg shouldn't prevent this case from occurring
- */
-
- return 0;
-}
-static int marvell_read_status(struct gfar_mii_info *mii_info)
-{
- u16 status;
- int err;
-
- /* Update the link, but return if there
- * was an error */
- err = genmii_update_link(mii_info);
- if (err)
- return err;
-
- /* If the link is up, read the speed and duplex */
- /* If we aren't autonegotiating, assume speeds
- * are as set */
- if (mii_info->autoneg && mii_info->link) {
- int speed;
- status = phy_read(mii_info, MII_M1011_PHY_SPEC_STATUS);
-
-#if 0
- /* If speed and duplex aren't resolved,
- * return an error. Isn't this handled
- * by checking aneg?
- */
- if ((status & MII_M1011_PHY_SPEC_STATUS_RESOLVED) == 0)
- return -EAGAIN;
-#endif
-
- /* Get the duplexity */
- if (status & MII_M1011_PHY_SPEC_STATUS_FULLDUPLEX)
- mii_info->duplex = DUPLEX_FULL;
- else
- mii_info->duplex = DUPLEX_HALF;
-
- /* Get the speed */
- speed = status & MII_M1011_PHY_SPEC_STATUS_SPD_MASK;
- switch(speed) {
- case MII_M1011_PHY_SPEC_STATUS_1000:
- mii_info->speed = SPEED_1000;
- break;
- case MII_M1011_PHY_SPEC_STATUS_100:
- mii_info->speed = SPEED_100;
- break;
- default:
- mii_info->speed = SPEED_10;
- break;
- }
- mii_info->pause = 0;
- }
-
- return 0;
-}
-
-
-static int cis820x_read_status(struct gfar_mii_info *mii_info)
-{
- u16 status;
- int err;
-
- /* Update the link, but return if there
- * was an error */
- err = genmii_update_link(mii_info);
- if (err)
- return err;
-
- /* If the link is up, read the speed and duplex */
- /* If we aren't autonegotiating, assume speeds
- * are as set */
- if (mii_info->autoneg && mii_info->link) {
- int speed;
-
- status = phy_read(mii_info, MII_CIS8201_AUX_CONSTAT);
- if (status & MII_CIS8201_AUXCONSTAT_DUPLEX)
- mii_info->duplex = DUPLEX_FULL;
- else
- mii_info->duplex = DUPLEX_HALF;
-
- speed = status & MII_CIS8201_AUXCONSTAT_SPEED;
-
- switch (speed) {
- case MII_CIS8201_AUXCONSTAT_GBIT:
- mii_info->speed = SPEED_1000;
- break;
- case MII_CIS8201_AUXCONSTAT_100:
- mii_info->speed = SPEED_100;
- break;
- default:
- mii_info->speed = SPEED_10;
- break;
- }
- }
-
- return 0;
-}
-
-static int marvell_ack_interrupt(struct gfar_mii_info *mii_info)
-{
- /* Clear the interrupts by reading the reg */
- phy_read(mii_info, MII_M1011_IEVENT);
-
- return 0;
-}
-
-static int marvell_config_intr(struct gfar_mii_info *mii_info)
-{
- if(mii_info->interrupts == MII_INTERRUPT_ENABLED)
- phy_write(mii_info, MII_M1011_IMASK, MII_M1011_IMASK_INIT);
- else
- phy_write(mii_info, MII_M1011_IMASK, MII_M1011_IMASK_CLEAR);
-
- return 0;
-}
-
-static int cis820x_init(struct gfar_mii_info *mii_info)
-{
- phy_write(mii_info, MII_CIS8201_AUX_CONSTAT,
- MII_CIS8201_AUXCONSTAT_INIT);
- phy_write(mii_info, MII_CIS8201_EXT_CON1,
- MII_CIS8201_EXTCON1_INIT);
-
- return 0;
-}
-
-static int cis820x_ack_interrupt(struct gfar_mii_info *mii_info)
-{
- phy_read(mii_info, MII_CIS8201_ISTAT);
-
- return 0;
-}
-
-static int cis820x_config_intr(struct gfar_mii_info *mii_info)
-{
- if(mii_info->interrupts == MII_INTERRUPT_ENABLED)
- phy_write(mii_info, MII_CIS8201_IMASK, MII_CIS8201_IMASK_MASK);
- else
- phy_write(mii_info, MII_CIS8201_IMASK, 0);
-
- return 0;
-}
-
-#define DM9161_DELAY 10
-
-static int dm9161_read_status(struct gfar_mii_info *mii_info)
-{
- u16 status;
- int err;
-
- /* Update the link, but return if there
- * was an error */
- err = genmii_update_link(mii_info);
- if (err)
- return err;
-
- /* If the link is up, read the speed and duplex */
- /* If we aren't autonegotiating, assume speeds
- * are as set */
- if (mii_info->autoneg && mii_info->link) {
- status = phy_read(mii_info, MII_DM9161_SCSR);
- if (status & (MII_DM9161_SCSR_100F | MII_DM9161_SCSR_100H))
- mii_info->speed = SPEED_100;
- else
- mii_info->speed = SPEED_10;
-
- if (status & (MII_DM9161_SCSR_100F | MII_DM9161_SCSR_10F))
- mii_info->duplex = DUPLEX_FULL;
- else
- mii_info->duplex = DUPLEX_HALF;
- }
-
- return 0;
-}
-
-
-static int dm9161_config_aneg(struct gfar_mii_info *mii_info)
-{
- struct dm9161_private *priv = mii_info->priv;
-
- if(0 == priv->resetdone)
- return -EAGAIN;
-
- return 0;
-}
-
-static void dm9161_timer(unsigned long data)
-{
- struct gfar_mii_info *mii_info = (struct gfar_mii_info *)data;
- struct dm9161_private *priv = mii_info->priv;
- u16 status = phy_read(mii_info, MII_BMSR);
-
- if (status & BMSR_ANEGCOMPLETE) {
- priv->resetdone = 1;
- } else
- mod_timer(&priv->timer, jiffies + DM9161_DELAY * HZ);
-}
-
-static int dm9161_init(struct gfar_mii_info *mii_info)
-{
- struct dm9161_private *priv;
-
- /* Allocate the private data structure */
- priv = kmalloc(sizeof(struct dm9161_private), GFP_KERNEL);
-
- if (NULL == priv)
- return -ENOMEM;
-
- mii_info->priv = priv;
-
- /* Reset is not done yet */
- priv->resetdone = 0;
-
- /* Isolate the PHY */
- phy_write(mii_info, MII_BMCR, BMCR_ISOLATE);
-
- /* Do not bypass the scrambler/descrambler */
- phy_write(mii_info, MII_DM9161_SCR, MII_DM9161_SCR_INIT);
-
- /* Clear 10BTCSR to default */
- phy_write(mii_info, MII_DM9161_10BTCSR, MII_DM9161_10BTCSR_INIT);
-
- /* Reconnect the PHY, and enable Autonegotiation */
- phy_write(mii_info, MII_BMCR, BMCR_ANENABLE);
-
- /* Start a timer for DM9161_DELAY seconds to wait
- * for the PHY to be ready */
- init_timer(&priv->timer);
- priv->timer.function = &dm9161_timer;
- priv->timer.data = (unsigned long) mii_info;
- mod_timer(&priv->timer, jiffies + DM9161_DELAY * HZ);
-
- return 0;
-}
-
-static void dm9161_close(struct gfar_mii_info *mii_info)
-{
- struct dm9161_private *priv = mii_info->priv;
-
- del_timer_sync(&priv->timer);
- kfree(priv);
-}
-
-#if 0
-static int dm9161_ack_interrupt(struct gfar_mii_info *mii_info)
-{
- phy_read(mii_info, MII_DM9161_INTR);
-
- return 0;
-}
-#endif
-
-/* Cicada 820x */
-static struct phy_info phy_info_cis820x = {
- 0x000fc440,
- "Cicada Cis8204",
- 0x000fffc0,
- .features = MII_GBIT_FEATURES,
- .init = &cis820x_init,
- .config_aneg = &gbit_config_aneg,
- .read_status = &cis820x_read_status,
- .ack_interrupt = &cis820x_ack_interrupt,
- .config_intr = &cis820x_config_intr,
-};
-
-static struct phy_info phy_info_dm9161 = {
- .phy_id = 0x0181b880,
- .name = "Davicom DM9161E",
- .phy_id_mask = 0x0ffffff0,
- .init = dm9161_init,
- .config_aneg = dm9161_config_aneg,
- .read_status = dm9161_read_status,
- .close = dm9161_close,
-};
-
-static struct phy_info phy_info_marvell = {
- .phy_id = 0x01410c00,
- .phy_id_mask = 0xffffff00,
- .name = "Marvell 88E1101/88E1111",
- .features = MII_GBIT_FEATURES,
- .config_aneg = &marvell_config_aneg,
- .read_status = &marvell_read_status,
- .ack_interrupt = &marvell_ack_interrupt,
- .config_intr = &marvell_config_intr,
-};
-
-static struct phy_info phy_info_genmii= {
- .phy_id = 0x00000000,
- .phy_id_mask = 0x00000000,
- .name = "Generic MII",
- .features = MII_BASIC_FEATURES,
- .config_aneg = genmii_config_aneg,
- .read_status = genmii_read_status,
-};
-
-static struct phy_info *phy_info[] = {
- &phy_info_cis820x,
- &phy_info_marvell,
- &phy_info_dm9161,
- &phy_info_genmii,
- NULL
-};
-
-u16 phy_read(struct gfar_mii_info *mii_info, u16 regnum)
-{
- u16 retval;
- unsigned long flags;
-
- spin_lock_irqsave(&mii_info->mdio_lock, flags);
- retval = mii_info->mdio_read(mii_info->dev, mii_info->mii_id, regnum);
- spin_unlock_irqrestore(&mii_info->mdio_lock, flags);
-
- return retval;
-}
-
-void phy_write(struct gfar_mii_info *mii_info, u16 regnum, u16 val)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&mii_info->mdio_lock, flags);
- mii_info->mdio_write(mii_info->dev,
- mii_info->mii_id,
- regnum, val);
- spin_unlock_irqrestore(&mii_info->mdio_lock, flags);
-}
-
-/* Use the PHY ID registers to determine what type of PHY is attached
- * to device dev. return a struct phy_info structure describing that PHY
- */
-struct phy_info * get_phy_info(struct gfar_mii_info *mii_info)
-{
- u16 phy_reg;
- u32 phy_ID;
- int i;
- struct phy_info *theInfo = NULL;
- struct net_device *dev = mii_info->dev;
-
- /* Grab the bits from PHYIR1, and put them in the upper half */
- phy_reg = phy_read(mii_info, MII_PHYSID1);
- phy_ID = (phy_reg & 0xffff) << 16;
-
- /* Grab the bits from PHYIR2, and put them in the lower half */
- phy_reg = phy_read(mii_info, MII_PHYSID2);
- phy_ID |= (phy_reg & 0xffff);
-
- /* loop through all the known PHY types, and find one that */
- /* matches the ID we read from the PHY. */
- for (i = 0; phy_info[i]; i++)
- if (phy_info[i]->phy_id ==
- (phy_ID & phy_info[i]->phy_id_mask)) {
- theInfo = phy_info[i];
- break;
- }
-
- /* This shouldn't happen, as we have generic PHY support */
- if (theInfo == NULL) {
- printk("%s: PHY id %x is not supported!\n", dev->name, phy_ID);
- return NULL;
- } else {
- printk("%s: PHY is %s (%x)\n", dev->name, theInfo->name,
- phy_ID);
- }
-
- return theInfo;
-}
diff --git a/drivers/net/gianfar_phy.h b/drivers/net/gianfar_phy.h
deleted file mode 100644
index 1e9b3abf1e6..00000000000
--- a/drivers/net/gianfar_phy.h
+++ /dev/null
@@ -1,213 +0,0 @@
-/*
- * drivers/net/gianfar_phy.h
- *
- * Gianfar Ethernet Driver -- PHY handling
- * Driver for FEC on MPC8540 and TSEC on MPC8540/MPC8560
- * Based on 8260_io/fcc_enet.c
- *
- * Author: Andy Fleming
- * Maintainer: Kumar Gala (kumar.gala@freescale.com)
- *
- * Copyright (c) 2002-2004 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- */
-#ifndef __GIANFAR_PHY_H
-#define __GIANFAR_PHY_H
-
-#define MII_end ((u32)-2)
-#define MII_read ((u32)-1)
-
-#define MIIMIND_BUSY 0x00000001
-#define MIIMIND_NOTVALID 0x00000004
-
-#define GFAR_AN_TIMEOUT 2000
-
-/* 1000BT control (Marvell & BCM54xx at least) */
-#define MII_1000BASETCONTROL 0x09
-#define MII_1000BASETCONTROL_FULLDUPLEXCAP 0x0200
-#define MII_1000BASETCONTROL_HALFDUPLEXCAP 0x0100
-
-/* Cicada Extended Control Register 1 */
-#define MII_CIS8201_EXT_CON1 0x17
-#define MII_CIS8201_EXTCON1_INIT 0x0000
-
-/* Cicada Interrupt Mask Register */
-#define MII_CIS8201_IMASK 0x19
-#define MII_CIS8201_IMASK_IEN 0x8000
-#define MII_CIS8201_IMASK_SPEED 0x4000
-#define MII_CIS8201_IMASK_LINK 0x2000
-#define MII_CIS8201_IMASK_DUPLEX 0x1000
-#define MII_CIS8201_IMASK_MASK 0xf000
-
-/* Cicada Interrupt Status Register */
-#define MII_CIS8201_ISTAT 0x1a
-#define MII_CIS8201_ISTAT_STATUS 0x8000
-#define MII_CIS8201_ISTAT_SPEED 0x4000
-#define MII_CIS8201_ISTAT_LINK 0x2000
-#define MII_CIS8201_ISTAT_DUPLEX 0x1000
-
-/* Cicada Auxiliary Control/Status Register */
-#define MII_CIS8201_AUX_CONSTAT 0x1c
-#define MII_CIS8201_AUXCONSTAT_INIT 0x0004
-#define MII_CIS8201_AUXCONSTAT_DUPLEX 0x0020
-#define MII_CIS8201_AUXCONSTAT_SPEED 0x0018
-#define MII_CIS8201_AUXCONSTAT_GBIT 0x0010
-#define MII_CIS8201_AUXCONSTAT_100 0x0008
-
-/* 88E1011 PHY Status Register */
-#define MII_M1011_PHY_SPEC_STATUS 0x11
-#define MII_M1011_PHY_SPEC_STATUS_1000 0x8000
-#define MII_M1011_PHY_SPEC_STATUS_100 0x4000
-#define MII_M1011_PHY_SPEC_STATUS_SPD_MASK 0xc000
-#define MII_M1011_PHY_SPEC_STATUS_FULLDUPLEX 0x2000
-#define MII_M1011_PHY_SPEC_STATUS_RESOLVED 0x0800
-#define MII_M1011_PHY_SPEC_STATUS_LINK 0x0400
-
-#define MII_M1011_IEVENT 0x13
-#define MII_M1011_IEVENT_CLEAR 0x0000
-
-#define MII_M1011_IMASK 0x12
-#define MII_M1011_IMASK_INIT 0x6400
-#define MII_M1011_IMASK_CLEAR 0x0000
-
-#define MII_DM9161_SCR 0x10
-#define MII_DM9161_SCR_INIT 0x0610
-
-/* DM9161 Specified Configuration and Status Register */
-#define MII_DM9161_SCSR 0x11
-#define MII_DM9161_SCSR_100F 0x8000
-#define MII_DM9161_SCSR_100H 0x4000
-#define MII_DM9161_SCSR_10F 0x2000
-#define MII_DM9161_SCSR_10H 0x1000
-
-/* DM9161 Interrupt Register */
-#define MII_DM9161_INTR 0x15
-#define MII_DM9161_INTR_PEND 0x8000
-#define MII_DM9161_INTR_DPLX_MASK 0x0800
-#define MII_DM9161_INTR_SPD_MASK 0x0400
-#define MII_DM9161_INTR_LINK_MASK 0x0200
-#define MII_DM9161_INTR_MASK 0x0100
-#define MII_DM9161_INTR_DPLX_CHANGE 0x0010
-#define MII_DM9161_INTR_SPD_CHANGE 0x0008
-#define MII_DM9161_INTR_LINK_CHANGE 0x0004
-#define MII_DM9161_INTR_INIT 0x0000
-#define MII_DM9161_INTR_STOP \
-(MII_DM9161_INTR_DPLX_MASK | MII_DM9161_INTR_SPD_MASK \
- | MII_DM9161_INTR_LINK_MASK | MII_DM9161_INTR_MASK)
-
-/* DM9161 10BT Configuration/Status */
-#define MII_DM9161_10BTCSR 0x12
-#define MII_DM9161_10BTCSR_INIT 0x7800
-
-#define MII_BASIC_FEATURES (SUPPORTED_10baseT_Half | \
- SUPPORTED_10baseT_Full | \
- SUPPORTED_100baseT_Half | \
- SUPPORTED_100baseT_Full | \
- SUPPORTED_Autoneg | \
- SUPPORTED_TP | \
- SUPPORTED_MII)
-
-#define MII_GBIT_FEATURES (MII_BASIC_FEATURES | \
- SUPPORTED_1000baseT_Half | \
- SUPPORTED_1000baseT_Full)
-
-#define MII_READ_COMMAND 0x00000001
-
-#define MII_INTERRUPT_DISABLED 0x0
-#define MII_INTERRUPT_ENABLED 0x1
-/* Taken from mii_if_info and sungem_phy.h */
-struct gfar_mii_info {
- /* Information about the PHY type */
- /* And management functions */
- struct phy_info *phyinfo;
-
- /* forced speed & duplex (no autoneg)
- * partner speed & duplex & pause (autoneg)
- */
- int speed;
- int duplex;
- int pause;
-
- /* The most recently read link state */
- int link;
-
- /* Enabled Interrupts */
- u32 interrupts;
-
- u32 advertising;
- int autoneg;
- int mii_id;
-
- /* private data pointer */
- /* For use by PHYs to maintain extra state */
- void *priv;
-
- /* Provided by host chip */
- struct net_device *dev;
-
- /* A lock to ensure that only one thing can read/write
- * the MDIO bus at a time */
- spinlock_t mdio_lock;
-
- /* Provided by ethernet driver */
- int (*mdio_read) (struct net_device *dev, int mii_id, int reg);
- void (*mdio_write) (struct net_device *dev, int mii_id, int reg, int val);
-};
-
-/* struct phy_info: a structure which defines attributes for a PHY
- *
- * id will contain a number which represents the PHY. During
- * startup, the driver will poll the PHY to find out what its
- * UID--as defined by registers 2 and 3--is. The 32-bit result
- * gotten from the PHY will be ANDed with phy_id_mask to
- * discard any bits which may change based on revision numbers
- * unimportant to functionality
- *
- * There are 6 commands which take a gfar_mii_info structure.
- * Each PHY must declare config_aneg, and read_status.
- */
-struct phy_info {
- u32 phy_id;
- char *name;
- unsigned int phy_id_mask;
- u32 features;
-
- /* Called to initialize the PHY */
- int (*init)(struct gfar_mii_info *mii_info);
-
- /* Called to suspend the PHY for power */
- int (*suspend)(struct gfar_mii_info *mii_info);
-
- /* Reconfigures autonegotiation (or disables it) */
- int (*config_aneg)(struct gfar_mii_info *mii_info);
-
- /* Determines the negotiated speed and duplex */
- int (*read_status)(struct gfar_mii_info *mii_info);
-
- /* Clears any pending interrupts */
- int (*ack_interrupt)(struct gfar_mii_info *mii_info);
-
- /* Enables or disables interrupts */
- int (*config_intr)(struct gfar_mii_info *mii_info);
-
- /* Clears up any memory if needed */
- void (*close)(struct gfar_mii_info *mii_info);
-};
-
-struct phy_info *get_phy_info(struct gfar_mii_info *mii_info);
-int read_phy_reg(struct net_device *dev, int mii_id, int regnum);
-void write_phy_reg(struct net_device *dev, int mii_id, int regnum, int value);
-void mii_clear_phy_interrupt(struct gfar_mii_info *mii_info);
-void mii_configure_phy_interrupt(struct gfar_mii_info *mii_info, u32 interrupts);
-
-struct dm9161_private {
- struct timer_list timer;
- int resetdone;
-};
-
-#endif /* GIANFAR_PHY_H */
diff --git a/drivers/net/hamradio/Kconfig b/drivers/net/hamradio/Kconfig
index de087cd609d..896aa02000d 100644
--- a/drivers/net/hamradio/Kconfig
+++ b/drivers/net/hamradio/Kconfig
@@ -1,6 +1,7 @@
config MKISS
tristate "Serial port KISS driver"
depends on AX25
+ select CRC16
---help---
KISS is a protocol used for the exchange of data between a computer
and a Terminal Node Controller (a small embedded system commonly
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index 1756f0ed54c..cb43a9d2877 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -144,7 +144,7 @@ static inline struct net_device *bpq_get_ax25_dev(struct net_device *dev)
{
struct bpqdev *bpq;
- list_for_each_entry(bpq, &bpq_devices, bpq_list) {
+ list_for_each_entry_rcu(bpq, &bpq_devices, bpq_list) {
if (bpq->ethdev == dev)
return bpq->axdev;
}
@@ -399,7 +399,7 @@ static void *bpq_seq_start(struct seq_file *seq, loff_t *pos)
if (*pos == 0)
return SEQ_START_TOKEN;
- list_for_each_entry(bpqdev, &bpq_devices, bpq_list) {
+ list_for_each_entry_rcu(bpqdev, &bpq_devices, bpq_list) {
if (i == *pos)
return bpqdev;
}
@@ -418,7 +418,7 @@ static void *bpq_seq_next(struct seq_file *seq, void *v, loff_t *pos)
p = ((struct bpqdev *)v)->bpq_list.next;
return (p == &bpq_devices) ? NULL
- : list_entry(p, struct bpqdev, bpq_list);
+ : rcu_dereference(list_entry(p, struct bpqdev, bpq_list));
}
static void bpq_seq_stop(struct seq_file *seq, void *v)
@@ -561,8 +561,6 @@ static int bpq_device_event(struct notifier_block *this,unsigned long event, voi
if (!dev_is_ethdev(dev))
return NOTIFY_DONE;
- rcu_read_lock();
-
switch (event) {
case NETDEV_UP: /* new ethernet device -> new BPQ interface */
if (bpq_get_ax25_dev(dev) == NULL)
@@ -581,7 +579,6 @@ static int bpq_device_event(struct notifier_block *this,unsigned long event, voi
default:
break;
}
- rcu_read_unlock();
return NOTIFY_DONE;
}
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index d9fe64b46f4..85d6dc005be 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -14,13 +14,14 @@
*
* Copyright (C) Hans Alblas PE1AYX <hans@esrac.ele.tue.nl>
* Copyright (C) 2004, 05 Ralf Baechle DL5RB <ralf@linux-mips.org>
+ * Copyright (C) 2004, 05 Thomas Osterried DL9SAU <thomas@x-berg.in-berlin.de>
*/
-
#include <linux/config.h>
#include <linux/module.h>
#include <asm/system.h>
#include <linux/bitops.h>
#include <asm/uaccess.h>
+#include <linux/crc16.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
@@ -39,11 +40,6 @@
#include <net/ax25.h>
-#ifdef CONFIG_INET
-#include <linux/ip.h>
-#include <linux/tcp.h>
-#endif
-
#define AX_MTU 236
/* SLIP/KISS protocol characters. */
@@ -80,9 +76,13 @@ struct mkiss {
int mode;
int crcmode; /* MW: for FlexNet, SMACK etc. */
-#define CRC_MODE_NONE 0
-#define CRC_MODE_FLEX 1
-#define CRC_MODE_SMACK 2
+ int crcauto; /* CRC auto mode */
+
+#define CRC_MODE_NONE 0
+#define CRC_MODE_FLEX 1
+#define CRC_MODE_SMACK 2
+#define CRC_MODE_FLEX_TEST 3
+#define CRC_MODE_SMACK_TEST 4
atomic_t refcnt;
struct semaphore dead_sem;
@@ -151,6 +151,21 @@ static int check_crc_flex(unsigned char *cp, int size)
return 0;
}
+static int check_crc_16(unsigned char *cp, int size)
+{
+ unsigned short crc = 0x0000;
+
+ if (size < 3)
+ return -1;
+
+ crc = crc16(0, cp, size);
+
+ if (crc != 0x0000)
+ return -1;
+
+ return 0;
+}
+
/*
* Standard encapsulation
*/
@@ -237,19 +252,42 @@ static void ax_bump(struct mkiss *ax)
spin_lock_bh(&ax->buflock);
if (ax->rbuff[0] > 0x0f) {
- if (ax->rbuff[0] & 0x20) {
- ax->crcmode = CRC_MODE_FLEX;
+ if (ax->rbuff[0] & 0x80) {
+ if (check_crc_16(ax->rbuff, ax->rcount) < 0) {
+ ax->stats.rx_errors++;
+ spin_unlock_bh(&ax->buflock);
+
+ return;
+ }
+ if (ax->crcmode != CRC_MODE_SMACK && ax->crcauto) {
+ printk(KERN_INFO
+ "mkiss: %s: Switchting to crc-smack\n",
+ ax->dev->name);
+ ax->crcmode = CRC_MODE_SMACK;
+ }
+ ax->rcount -= 2;
+ *ax->rbuff &= ~0x80;
+ } else if (ax->rbuff[0] & 0x20) {
if (check_crc_flex(ax->rbuff, ax->rcount) < 0) {
- ax->stats.rx_errors++;
+ ax->stats.rx_errors++;
+ spin_unlock_bh(&ax->buflock);
return;
}
+ if (ax->crcmode != CRC_MODE_FLEX && ax->crcauto) {
+ printk(KERN_INFO
+ "mkiss: %s: Switchting to crc-flexnet\n",
+ ax->dev->name);
+ ax->crcmode = CRC_MODE_FLEX;
+ }
ax->rcount -= 2;
- /* dl9sau bugfix: the trailling two bytes flexnet crc
- * will not be passed to the kernel. thus we have
- * to correct the kissparm signature, because it
- * indicates a crc but there's none
+
+ /*
+ * dl9sau bugfix: the trailling two bytes flexnet crc
+ * will not be passed to the kernel. thus we have to
+ * correct the kissparm signature, because it indicates
+ * a crc but there's none
*/
- *ax->rbuff &= ~0x20;
+ *ax->rbuff &= ~0x20;
}
}
spin_unlock_bh(&ax->buflock);
@@ -417,20 +455,69 @@ static void ax_encaps(struct net_device *dev, unsigned char *icp, int len)
p = icp;
spin_lock_bh(&ax->buflock);
- switch (ax->crcmode) {
- unsigned short crc;
+ if ((*p & 0x0f) != 0) {
+ /* Configuration Command (kissparms(1).
+ * Protocol spec says: never append CRC.
+ * This fixes a very old bug in the linux
+ * kiss driver. -- dl9sau */
+ switch (*p & 0xff) {
+ case 0x85:
+ /* command from userspace especially for us,
+ * not for delivery to the tnc */
+ if (len > 1) {
+ int cmd = (p[1] & 0xff);
+ switch(cmd) {
+ case 3:
+ ax->crcmode = CRC_MODE_SMACK;
+ break;
+ case 2:
+ ax->crcmode = CRC_MODE_FLEX;
+ break;
+ case 1:
+ ax->crcmode = CRC_MODE_NONE;
+ break;
+ case 0:
+ default:
+ ax->crcmode = CRC_MODE_SMACK_TEST;
+ cmd = 0;
+ }
+ ax->crcauto = (cmd ? 0 : 1);
+ printk(KERN_INFO "mkiss: %s: crc mode %s %d\n", ax->dev->name, (len) ? "set to" : "is", cmd);
+ }
+ spin_unlock_bh(&ax->buflock);
+ netif_start_queue(dev);
- case CRC_MODE_FLEX:
- *p |= 0x20;
- crc = calc_crc_flex(p, len);
- count = kiss_esc_crc(p, (unsigned char *)ax->xbuff, crc, len+2);
- break;
+ return;
+ default:
+ count = kiss_esc(p, (unsigned char *)ax->xbuff, len);
+ }
+ } else {
+ unsigned short crc;
+ switch (ax->crcmode) {
+ case CRC_MODE_SMACK_TEST:
+ ax->crcmode = CRC_MODE_FLEX_TEST;
+ printk(KERN_INFO "mkiss: %s: Trying crc-smack\n", ax->dev->name);
+ // fall through
+ case CRC_MODE_SMACK:
+ *p |= 0x80;
+ crc = swab16(crc16(0, p, len));
+ count = kiss_esc_crc(p, (unsigned char *)ax->xbuff, crc, len+2);
+ break;
+ case CRC_MODE_FLEX_TEST:
+ ax->crcmode = CRC_MODE_NONE;
+ printk(KERN_INFO "mkiss: %s: Trying crc-flexnet\n", ax->dev->name);
+ // fall through
+ case CRC_MODE_FLEX:
+ *p |= 0x20;
+ crc = calc_crc_flex(p, len);
+ count = kiss_esc_crc(p, (unsigned char *)ax->xbuff, crc, len+2);
+ break;
+
+ default:
+ count = kiss_esc(p, (unsigned char *)ax->xbuff, len);
+ }
+ }
- default:
- count = kiss_esc(p, (unsigned char *)ax->xbuff, len);
- break;
- }
-
set_bit(TTY_DO_WRITE_WAKEUP, &ax->tty->flags);
actual = ax->tty->driver->write(ax->tty, ax->xbuff, count);
ax->stats.tx_packets++;
@@ -439,8 +526,6 @@ static void ax_encaps(struct net_device *dev, unsigned char *icp, int len)
ax->dev->trans_start = jiffies;
ax->xleft = count - actual;
ax->xhead = ax->xbuff + actual;
-
- spin_unlock_bh(&ax->buflock);
}
/* Encapsulate an AX.25 packet and kick it into a TTY queue. */
@@ -622,7 +707,7 @@ static void ax_setup(struct net_device *dev)
* best way to fix this is to use a rwlock in the tty struct, but for now we
* use a single global rwlock for all ttys in ppp line discipline.
*/
-static rwlock_t disc_data_lock = RW_LOCK_UNLOCKED;
+static DEFINE_RWLOCK(disc_data_lock);
static struct mkiss *mkiss_get(struct tty_struct *tty)
{
@@ -643,6 +728,8 @@ static void mkiss_put(struct mkiss *ax)
up(&ax->dead_sem);
}
+static int crc_force = 0; /* Can be overridden with insmod */
+
static int mkiss_open(struct tty_struct *tty)
{
struct net_device *dev;
@@ -682,6 +769,33 @@ static int mkiss_open(struct tty_struct *tty)
if (register_netdev(dev))
goto out_free_buffers;
+ /* after register_netdev() - because else printk smashes the kernel */
+ switch (crc_force) {
+ case 3:
+ ax->crcmode = CRC_MODE_SMACK;
+ printk(KERN_INFO "mkiss: %s: crc mode smack forced.\n",
+ ax->dev->name);
+ break;
+ case 2:
+ ax->crcmode = CRC_MODE_FLEX;
+ printk(KERN_INFO "mkiss: %s: crc mode flexnet forced.\n",
+ ax->dev->name);
+ break;
+ case 1:
+ ax->crcmode = CRC_MODE_NONE;
+ printk(KERN_INFO "mkiss: %s: crc mode disabled.\n",
+ ax->dev->name);
+ break;
+ case 0:
+ /* fall through */
+ default:
+ crc_force = 0;
+ printk(KERN_INFO "mkiss: %s: crc mode is auto.\n",
+ ax->dev->name);
+ ax->crcmode = CRC_MODE_SMACK_TEST;
+ }
+ ax->crcauto = (crc_force ? 0 : 1);
+
netif_start_queue(dev);
/* Done. We have linked the TTY line to a channel. */
@@ -765,7 +879,6 @@ static int mkiss_ioctl(struct tty_struct *tty, struct file *file,
case SIOCSIFHWADDR: {
char addr[AX25_ADDR_LEN];
-printk(KERN_INFO "In SIOCSIFHWADDR");
if (copy_from_user(&addr,
(void __user *) arg, AX25_ADDR_LEN)) {
@@ -864,6 +977,7 @@ out:
}
static struct tty_ldisc ax_ldisc = {
+ .owner = THIS_MODULE,
.magic = TTY_LDISC_MAGIC,
.name = "mkiss",
.open = mkiss_open,
@@ -904,6 +1018,8 @@ static void __exit mkiss_exit_driver(void)
MODULE_AUTHOR("Ralf Baechle DL5RB <ralf@linux-mips.org>");
MODULE_DESCRIPTION("KISS driver for AX.25 over TTYs");
+MODULE_PARM(crc_force, "i");
+MODULE_PARM_DESC(crc_force, "crc [0 = auto | 1 = none | 2 = flexnet | 3 = smack]");
MODULE_LICENSE("GPL");
MODULE_ALIAS_LDISC(N_AX25);
diff --git a/drivers/net/hamradio/mkiss.h b/drivers/net/hamradio/mkiss.h
deleted file mode 100644
index 4ab70047859..00000000000
--- a/drivers/net/hamradio/mkiss.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/****************************************************************************
- * Defines for the Multi-KISS driver.
- ****************************************************************************/
-
-#define AX25_MAXDEV 16 /* MAX number of AX25 channels;
- This can be overridden with
- insmod -oax25_maxdev=nnn */
-#define AX_MTU 236
-
-/* SLIP/KISS protocol characters. */
-#define END 0300 /* indicates end of frame */
-#define ESC 0333 /* indicates byte stuffing */
-#define ESC_END 0334 /* ESC ESC_END means END 'data' */
-#define ESC_ESC 0335 /* ESC ESC_ESC means ESC 'data' */
-
-struct ax_disp {
- int magic;
-
- /* Various fields. */
- struct tty_struct *tty; /* ptr to TTY structure */
- struct net_device *dev; /* easy for intr handling */
-
- /* These are pointers to the malloc()ed frame buffers. */
- unsigned char *rbuff; /* receiver buffer */
- int rcount; /* received chars counter */
- unsigned char *xbuff; /* transmitter buffer */
- unsigned char *xhead; /* pointer to next byte to XMIT */
- int xleft; /* bytes left in XMIT queue */
-
- /* SLIP interface statistics. */
- unsigned long rx_packets; /* inbound frames counter */
- unsigned long tx_packets; /* outbound frames counter */
- unsigned long rx_bytes; /* inbound bytes counter */
- unsigned long tx_bytes; /* outbound bytes counter */
- unsigned long rx_errors; /* Parity, etc. errors */
- unsigned long tx_errors; /* Planned stuff */
- unsigned long rx_dropped; /* No memory for skb */
- unsigned long tx_dropped; /* When MTU change */
- unsigned long rx_over_errors; /* Frame bigger then SLIP buf. */
-
- /* Detailed SLIP statistics. */
- int mtu; /* Our mtu (to spot changes!) */
- int buffsize; /* Max buffers sizes */
-
-
- unsigned long flags; /* Flag values/ mode etc */
- /* long req'd: used by set_bit --RR */
-#define AXF_INUSE 0 /* Channel in use */
-#define AXF_ESCAPE 1 /* ESC received */
-#define AXF_ERROR 2 /* Parity, etc. error */
-#define AXF_KEEPTEST 3 /* Keepalive test flag */
-#define AXF_OUTWAIT 4 /* is outpacket was flag */
-
- int mode;
- int crcmode; /* MW: for FlexNet, SMACK etc. */
-#define CRC_MODE_NONE 0
-#define CRC_MODE_FLEX 1
-#define CRC_MODE_SMACK 2
- spinlock_t buflock; /* lock for rbuf and xbuf */
-};
-
-#define AX25_MAGIC 0x5316
diff --git a/drivers/net/hp100.c b/drivers/net/hp100.c
index cf0ac6fda1a..b71fab6e34f 100644
--- a/drivers/net/hp100.c
+++ b/drivers/net/hp100.c
@@ -2517,10 +2517,8 @@ static int hp100_down_vg_link(struct net_device *dev)
do {
if (hp100_inb(VG_LAN_CFG_1) & HP100_LINK_CABLE_ST)
break;
- if (!in_interrupt()) {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(1);
- }
+ if (!in_interrupt())
+ schedule_timeout_interruptible(1);
} while (time_after(time, jiffies));
if (time_after_eq(jiffies, time)) /* no signal->no logout */
@@ -2536,10 +2534,8 @@ static int hp100_down_vg_link(struct net_device *dev)
do {
if (!(hp100_inb(VG_LAN_CFG_1) & HP100_LINK_UP_ST))
break;
- if (!in_interrupt()) {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(1);
- }
+ if (!in_interrupt())
+ schedule_timeout_interruptible(1);
} while (time_after(time, jiffies));
#ifdef HP100_DEBUG
@@ -2577,10 +2573,8 @@ static int hp100_down_vg_link(struct net_device *dev)
do {
if (!(hp100_inb(MAC_CFG_4) & HP100_MAC_SEL_ST))
break;
- if (!in_interrupt()) {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(1);
- }
+ if (!in_interrupt())
+ schedule_timeout_interruptible(1);
} while (time_after(time, jiffies));
hp100_orb(HP100_AUTO_MODE, MAC_CFG_3); /* Autosel back on */
@@ -2591,10 +2585,8 @@ static int hp100_down_vg_link(struct net_device *dev)
do {
if ((hp100_inb(VG_LAN_CFG_1) & HP100_LINK_CABLE_ST) == 0)
break;
- if (!in_interrupt()) {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(1);
- }
+ if (!in_interrupt())
+ schedule_timeout_interruptible(1);
} while (time_after(time, jiffies));
if (time_before_eq(time, jiffies)) {
@@ -2606,10 +2598,8 @@ static int hp100_down_vg_link(struct net_device *dev)
time = jiffies + (2 * HZ); /* This seems to take a while.... */
do {
- if (!in_interrupt()) {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(1);
- }
+ if (!in_interrupt())
+ schedule_timeout_interruptible(1);
} while (time_after(time, jiffies));
return 0;
@@ -2659,10 +2649,8 @@ static int hp100_login_to_vg_hub(struct net_device *dev, u_short force_relogin)
do {
if (~(hp100_inb(VG_LAN_CFG_1) & HP100_LINK_UP_ST))
break;
- if (!in_interrupt()) {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(1);
- }
+ if (!in_interrupt())
+ schedule_timeout_interruptible(1);
} while (time_after(time, jiffies));
/* Start an addressed training and optionally request promiscuous port */
@@ -2697,10 +2685,8 @@ static int hp100_login_to_vg_hub(struct net_device *dev, u_short force_relogin)
do {
if (hp100_inb(VG_LAN_CFG_1) & HP100_LINK_CABLE_ST)
break;
- if (!in_interrupt()) {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(1);
- }
+ if (!in_interrupt())
+ schedule_timeout_interruptible(1);
} while (time_before(jiffies, time));
if (time_after_eq(jiffies, time)) {
@@ -2723,10 +2709,8 @@ static int hp100_login_to_vg_hub(struct net_device *dev, u_short force_relogin)
#endif
break;
}
- if (!in_interrupt()) {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(1);
- }
+ if (!in_interrupt())
+ schedule_timeout_interruptible(1);
} while (time_after(time, jiffies));
}
diff --git a/drivers/net/irda/stir4200.c b/drivers/net/irda/stir4200.c
index 15f207323d9..3961a754e92 100644
--- a/drivers/net/irda/stir4200.c
+++ b/drivers/net/irda/stir4200.c
@@ -678,10 +678,9 @@ static void turnaround_delay(const struct stir_cb *stir, long us)
return;
ticks = us / (1000000 / HZ);
- if (ticks > 0) {
- current->state = TASK_INTERRUPTIBLE;
- schedule_timeout(1 + ticks);
- } else
+ if (ticks > 0)
+ schedule_timeout_interruptible(1 + ticks);
+ else
udelay(us);
}
diff --git a/drivers/net/ixgb/ixgb_ethtool.c b/drivers/net/ixgb/ixgb_ethtool.c
index 9d026ed77dd..04e47189d83 100644
--- a/drivers/net/ixgb/ixgb_ethtool.c
+++ b/drivers/net/ixgb/ixgb_ethtool.c
@@ -645,11 +645,10 @@ ixgb_phys_id(struct net_device *netdev, uint32_t data)
mod_timer(&adapter->blink_timer, jiffies);
- set_current_state(TASK_INTERRUPTIBLE);
- if(data)
- schedule_timeout(data * HZ);
+ if (data)
+ schedule_timeout_interruptible(data * HZ);
else
- schedule_timeout(MAX_SCHEDULE_TIMEOUT);
+ schedule_timeout_interruptible(MAX_SCHEDULE_TIMEOUT);
del_timer_sync(&adapter->blink_timer);
ixgb_led_off(&adapter->hw);
@@ -723,6 +722,7 @@ struct ethtool_ops ixgb_ethtool_ops = {
.phys_id = ixgb_phys_id,
.get_stats_count = ixgb_get_stats_count,
.get_ethtool_stats = ixgb_get_ethtool_stats,
+ .get_perm_addr = ethtool_op_get_perm_addr,
};
void ixgb_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index 89d6d69be38..176680cb153 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -460,8 +460,9 @@ ixgb_probe(struct pci_dev *pdev,
}
ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr);
+ memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
- if(!is_valid_ether_addr(netdev->dev_addr)) {
+ if(!is_valid_ether_addr(netdev->perm_addr)) {
err = -EIO;
goto err_eeprom;
}
diff --git a/drivers/net/lance.c b/drivers/net/lance.c
index b4929beb33b..1d75ca0bb93 100644
--- a/drivers/net/lance.c
+++ b/drivers/net/lance.c
@@ -298,7 +298,7 @@ enum {OLD_LANCE = 0, PCNET_ISA=1, PCNET_ISAP=2, PCNET_PCI=3, PCNET_VLB=4, PCNET_
static unsigned char lance_need_isa_bounce_buffers = 1;
static int lance_open(struct net_device *dev);
-static void lance_init_ring(struct net_device *dev, int mode);
+static void lance_init_ring(struct net_device *dev, gfp_t mode);
static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev);
static int lance_rx(struct net_device *dev);
static irqreturn_t lance_interrupt(int irq, void *dev_id, struct pt_regs *regs);
@@ -846,7 +846,7 @@ lance_purge_ring(struct net_device *dev)
/* Initialize the LANCE Rx and Tx rings. */
static void
-lance_init_ring(struct net_device *dev, int gfp)
+lance_init_ring(struct net_device *dev, gfp_t gfp)
{
struct lance_private *lp = dev->priv;
int i;
diff --git a/drivers/net/lne390.c b/drivers/net/lne390.c
index 27f0d8ac4c4..309d254842c 100644
--- a/drivers/net/lne390.c
+++ b/drivers/net/lne390.c
@@ -298,7 +298,7 @@ static int __init lne390_probe1(struct net_device *dev, int ioaddr)
return 0;
unmap:
if (ei_status.reg0)
- iounmap((void *)dev->mem_start);
+ iounmap(ei_status.mem);
cleanup:
free_irq(dev->irq, dev);
return ret;
diff --git a/drivers/net/mii.c b/drivers/net/mii.c
index c33cb3dc942..e42aa797f08 100644
--- a/drivers/net/mii.c
+++ b/drivers/net/mii.c
@@ -207,6 +207,20 @@ int mii_ethtool_sset(struct mii_if_info *mii, struct ethtool_cmd *ecmd)
return 0;
}
+int mii_check_gmii_support(struct mii_if_info *mii)
+{
+ int reg;
+
+ reg = mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR);
+ if (reg & BMSR_ESTATEN) {
+ reg = mii->mdio_read(mii->dev, mii->phy_id, MII_ESTATUS);
+ if (reg & (ESTATUS_1000_TFULL | ESTATUS_1000_THALF))
+ return 1;
+ }
+
+ return 0;
+}
+
int mii_link_ok (struct mii_if_info *mii)
{
/* first, a dummy read, needed to latch some MII phys */
@@ -394,5 +408,6 @@ EXPORT_SYMBOL(mii_ethtool_gset);
EXPORT_SYMBOL(mii_ethtool_sset);
EXPORT_SYMBOL(mii_check_link);
EXPORT_SYMBOL(mii_check_media);
+EXPORT_SYMBOL(mii_check_gmii_support);
EXPORT_SYMBOL(generic_mii_ioctl);
diff --git a/drivers/net/mipsnet.c b/drivers/net/mipsnet.c
new file mode 100644
index 00000000000..f79f7ee72ab
--- /dev/null
+++ b/drivers/net/mipsnet.c
@@ -0,0 +1,371 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#define DEBUG
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/sched.h>
+#include <linux/etherdevice.h>
+#include <linux/netdevice.h>
+#include <asm/io.h>
+#include <asm/mips-boards/simint.h>
+
+#include "mipsnet.h" /* actual device IO mapping */
+
+#define MIPSNET_VERSION "2005-06-20"
+
+#define mipsnet_reg_address(dev, field) (dev->base_addr + field_offset(field))
+
+struct mipsnet_priv {
+ struct net_device_stats stats;
+};
+
+static struct platform_device *mips_plat_dev;
+
+static char mipsnet_string[] = "mipsnet";
+
+/*
+ * Copy data from the MIPSNET rx data port
+ */
+static int ioiocpy_frommipsnet(struct net_device *dev, unsigned char *kdata,
+ int len)
+{
+ uint32_t available_len = inl(mipsnet_reg_address(dev, rxDataCount));
+ if (available_len < len)
+ return -EFAULT;
+
+ for (; len > 0; len--, kdata++) {
+ *kdata = inb(mipsnet_reg_address(dev, rxDataBuffer));
+ }
+
+ return inl(mipsnet_reg_address(dev, rxDataCount));
+}
+
+static inline ssize_t mipsnet_put_todevice(struct net_device *dev,
+ struct sk_buff *skb)
+{
+ int count_to_go = skb->len;
+ char *buf_ptr = skb->data;
+ struct mipsnet_priv *mp = netdev_priv(dev);
+
+ pr_debug("%s: %s(): telling MIPSNET txDataCount(%d)\n",
+ dev->name, __FUNCTION__, skb->len);
+
+ outl(skb->len, mipsnet_reg_address(dev, txDataCount));
+
+ pr_debug("%s: %s(): sending data to MIPSNET txDataBuffer(%d)\n",
+ dev->name, __FUNCTION__, skb->len);
+
+ for (; count_to_go; buf_ptr++, count_to_go--) {
+ outb(*buf_ptr, mipsnet_reg_address(dev, txDataBuffer));
+ }
+
+ mp->stats.tx_packets++;
+ mp->stats.tx_bytes += skb->len;
+
+ return skb->len;
+}
+
+static int mipsnet_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ pr_debug("%s:%s(): transmitting %d bytes\n",
+ dev->name, __FUNCTION__, skb->len);
+
+ /* Only one packet at a time. Once TXDONE interrupt is serviced, the
+ * queue will be restarted.
+ */
+ netif_stop_queue(dev);
+ mipsnet_put_todevice(dev, skb);
+
+ return 0;
+}
+
+static inline ssize_t mipsnet_get_fromdev(struct net_device *dev, size_t count)
+{
+ struct sk_buff *skb;
+ size_t len = count;
+ struct mipsnet_priv *mp = netdev_priv(dev);
+
+ if (!(skb = alloc_skb(len + 2, GFP_KERNEL))) {
+ mp->stats.rx_dropped++;
+ return -ENOMEM;
+ }
+
+ skb_reserve(skb, 2);
+ if (ioiocpy_frommipsnet(dev, skb_put(skb, len), len))
+ return -EFAULT;
+
+ skb->dev = dev;
+ skb->protocol = eth_type_trans(skb, dev);
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ pr_debug("%s:%s(): pushing RXed data to kernel\n",
+ dev->name, __FUNCTION__);
+ netif_rx(skb);
+
+ mp->stats.rx_packets++;
+ mp->stats.rx_bytes += len;
+
+ return count;
+}
+
+static irqreturn_t
+mipsnet_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = dev_id;
+
+ irqreturn_t retval = IRQ_NONE;
+ uint64_t interruptFlags;
+
+ if (irq == dev->irq) {
+ pr_debug("%s:%s(): irq %d for device\n",
+ dev->name, __FUNCTION__, irq);
+
+ retval = IRQ_HANDLED;
+
+ interruptFlags =
+ inl(mipsnet_reg_address(dev, interruptControl));
+ pr_debug("%s:%s(): intCtl=0x%016llx\n", dev->name,
+ __FUNCTION__, interruptFlags);
+
+ if (interruptFlags & MIPSNET_INTCTL_TXDONE) {
+ pr_debug("%s:%s(): got TXDone\n",
+ dev->name, __FUNCTION__);
+ outl(MIPSNET_INTCTL_TXDONE,
+ mipsnet_reg_address(dev, interruptControl));
+ // only one packet at a time, we are done.
+ netif_wake_queue(dev);
+ } else if (interruptFlags & MIPSNET_INTCTL_RXDONE) {
+ pr_debug("%s:%s(): got RX data\n",
+ dev->name, __FUNCTION__);
+ mipsnet_get_fromdev(dev,
+ inl(mipsnet_reg_address(dev, rxDataCount)));
+ pr_debug("%s:%s(): clearing RX int\n",
+ dev->name, __FUNCTION__);
+ outl(MIPSNET_INTCTL_RXDONE,
+ mipsnet_reg_address(dev, interruptControl));
+
+ } else if (interruptFlags & MIPSNET_INTCTL_TESTBIT) {
+ pr_debug("%s:%s(): got test interrupt\n",
+ dev->name, __FUNCTION__);
+ // TESTBIT is cleared on read.
+ // And takes effect after a write with 0
+ outl(0, mipsnet_reg_address(dev, interruptControl));
+ } else {
+ pr_debug("%s:%s(): no valid fags 0x%016llx\n",
+ dev->name, __FUNCTION__, interruptFlags);
+ // Maybe shared IRQ, just ignore, no clearing.
+ retval = IRQ_NONE;
+ }
+
+ } else {
+ printk(KERN_INFO "%s: %s(): irq %d for unknown device\n",
+ dev->name, __FUNCTION__, irq);
+ retval = IRQ_NONE;
+ }
+ return retval;
+} //mipsnet_interrupt()
+
+static int mipsnet_open(struct net_device *dev)
+{
+ int err;
+ pr_debug("%s: mipsnet_open\n", dev->name);
+
+ err = request_irq(dev->irq, &mipsnet_interrupt,
+ SA_SHIRQ, dev->name, (void *) dev);
+
+ if (err) {
+ pr_debug("%s: %s(): can't get irq %d\n",
+ dev->name, __FUNCTION__, dev->irq);
+ release_region(dev->base_addr, MIPSNET_IO_EXTENT);
+ return err;
+ }
+
+ pr_debug("%s: %s(): got IO region at 0x%04lx and irq %d for dev.\n",
+ dev->name, __FUNCTION__, dev->base_addr, dev->irq);
+
+
+ netif_start_queue(dev);
+
+ // test interrupt handler
+ outl(MIPSNET_INTCTL_TESTBIT,
+ mipsnet_reg_address(dev, interruptControl));
+
+
+ return 0;
+}
+
+static int mipsnet_close(struct net_device *dev)
+{
+ pr_debug("%s: %s()\n", dev->name, __FUNCTION__);
+ netif_stop_queue(dev);
+ return 0;
+}
+
+static struct net_device_stats *mipsnet_get_stats(struct net_device *dev)
+{
+ struct mipsnet_priv *mp = netdev_priv(dev);
+
+ return &mp->stats;
+}
+
+static void mipsnet_set_mclist(struct net_device *dev)
+{
+ // we don't do anything
+ return;
+}
+
+static int __init mipsnet_probe(struct device *dev)
+{
+ struct net_device *netdev;
+ int err;
+
+ netdev = alloc_etherdev(sizeof(struct mipsnet_priv));
+ if (!netdev) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ dev_set_drvdata(dev, netdev);
+
+ netdev->open = mipsnet_open;
+ netdev->stop = mipsnet_close;
+ netdev->hard_start_xmit = mipsnet_xmit;
+ netdev->get_stats = mipsnet_get_stats;
+ netdev->set_multicast_list = mipsnet_set_mclist;
+
+ /*
+ * TODO: probe for these or load them from PARAM
+ */
+ netdev->base_addr = 0x4200;
+ netdev->irq = MIPSCPU_INT_BASE + MIPSCPU_INT_MB0 +
+ inl(mipsnet_reg_address(netdev, interruptInfo));
+
+ // Get the io region now, get irq on open()
+ if (!request_region(netdev->base_addr, MIPSNET_IO_EXTENT, "mipsnet")) {
+ pr_debug("%s: %s(): IO region {start: 0x%04lux, len: %d} "
+ "for dev is not availble.\n", netdev->name,
+ __FUNCTION__, netdev->base_addr, MIPSNET_IO_EXTENT);
+ err = -EBUSY;
+ goto out_free_netdev;
+ }
+
+ /*
+ * Lacking any better mechanism to allocate a MAC address we use a
+ * random one ...
+ */
+ random_ether_addr(netdev->dev_addr);
+
+ err = register_netdev(netdev);
+ if (err) {
+ printk(KERN_ERR "MIPSNet: failed to register netdev.\n");
+ goto out_free_region;
+ }
+
+ return 0;
+
+out_free_region:
+ release_region(netdev->base_addr, MIPSNET_IO_EXTENT);
+
+out_free_netdev:
+ free_netdev(netdev);
+
+out:
+ return err;
+}
+
+static int __devexit mipsnet_device_remove(struct device *device)
+{
+ struct net_device *dev = dev_get_drvdata(device);
+
+ unregister_netdev(dev);
+ release_region(dev->base_addr, MIPSNET_IO_EXTENT);
+ free_netdev(dev);
+ dev_set_drvdata(device, NULL);
+
+ return 0;
+}
+
+static struct device_driver mipsnet_driver = {
+ .name = mipsnet_string,
+ .bus = &platform_bus_type,
+ .probe = mipsnet_probe,
+ .remove = __devexit_p(mipsnet_device_remove),
+};
+
+static void mipsnet_platform_release(struct device *device)
+{
+ struct platform_device *pldev;
+
+ /* free device */
+ pldev = to_platform_device(device);
+ kfree(pldev);
+}
+
+static int __init mipsnet_init_module(void)
+{
+ struct platform_device *pldev;
+ int err;
+
+ printk(KERN_INFO "MIPSNet Ethernet driver. Version: %s. "
+ "(c)2005 MIPS Technologies, Inc.\n", MIPSNET_VERSION);
+
+ if (driver_register(&mipsnet_driver)) {
+ printk(KERN_ERR "Driver registration failed\n");
+ err = -ENODEV;
+ goto out;
+ }
+
+ if (!(pldev = kmalloc (sizeof (*pldev), GFP_KERNEL))) {
+ err = -ENOMEM;
+ goto out_unregister_driver;
+ }
+
+ memset (pldev, 0, sizeof (*pldev));
+ pldev->name = mipsnet_string;
+ pldev->id = 0;
+ pldev->dev.release = mipsnet_platform_release;
+
+ if (platform_device_register(pldev)) {
+ err = -ENODEV;
+ goto out_free_pldev;
+ }
+
+ if (!pldev->dev.driver) {
+ /*
+ * The driver was not bound to this device, there was
+ * no hardware at this address. Unregister it, as the
+ * release fuction will take care of freeing the
+ * allocated structure
+ */
+ platform_device_unregister (pldev);
+ }
+
+ mips_plat_dev = pldev;
+
+ return 0;
+
+out_free_pldev:
+ kfree(pldev);
+
+out_unregister_driver:
+ driver_unregister(&mipsnet_driver);
+out:
+ return err;
+}
+
+static void __exit mipsnet_exit_module(void)
+{
+ pr_debug("MIPSNet Ethernet driver exiting\n");
+
+ driver_unregister(&mipsnet_driver);
+}
+
+module_init(mipsnet_init_module);
+module_exit(mipsnet_exit_module);
diff --git a/drivers/net/mipsnet.h b/drivers/net/mipsnet.h
new file mode 100644
index 00000000000..878535953cb
--- /dev/null
+++ b/drivers/net/mipsnet.h
@@ -0,0 +1,127 @@
+//
+// <COPYRIGHT CLASS="1B" YEAR="2005">
+// Unpublished work (c) MIPS Technologies, Inc. All rights reserved.
+// Unpublished rights reserved under the copyright laws of the U.S.A. and
+// other countries.
+//
+// PROPRIETARY / SECRET CONFIDENTIAL INFORMATION OF MIPS TECHNOLOGIES, INC.
+// FOR INTERNAL USE ONLY.
+//
+// Under no circumstances (contract or otherwise) may this information be
+// disclosed to, or copied, modified or used by anyone other than employees
+// or contractors of MIPS Technologies having a need to know.
+// </COPYRIGHT>
+//
+//++
+// File: MIPS_Net.h
+//
+// Description:
+// The definition of the emulated MIPSNET device's interface.
+//
+// Notes: This include file needs to work from a Linux device drivers.
+//
+//--
+//
+
+#ifndef __MIPSNET_H
+#define __MIPSNET_H
+
+/*
+ * Id of this Net device, as seen by the core.
+ */
+#define MIPS_NET_DEV_ID ((uint64_t) \
+ ((uint64_t)'M'<< 0)| \
+ ((uint64_t)'I'<< 8)| \
+ ((uint64_t)'P'<<16)| \
+ ((uint64_t)'S'<<24)| \
+ ((uint64_t)'N'<<32)| \
+ ((uint64_t)'E'<<40)| \
+ ((uint64_t)'T'<<48)| \
+ ((uint64_t)'0'<<56))
+
+/*
+ * Net status/control block as seen by sw in the core.
+ * (Why not use bit fields? can't be bothered with cross-platform struct
+ * packing.)
+ */
+typedef struct _net_control_block {
+ /// dev info for probing
+ /// reads as MIPSNET%d where %d is some form of version
+ uint64_t devId; /*0x00 */
+
+ /*
+ * read only busy flag.
+ * Set and cleared by the Net Device to indicate that an rx or a tx
+ * is in progress.
+ */
+ uint32_t busy; /*0x08 */
+
+ /*
+ * Set by the Net Device.
+ * The device will set it once data has been received.
+ * The value is the number of bytes that should be read from
+ * rxDataBuffer. The value will decrease till 0 until all the data
+ * from rxDataBuffer has been read.
+ */
+ uint32_t rxDataCount; /*0x0c */
+#define MIPSNET_MAX_RXTX_DATACOUNT (1<<16)
+
+ /*
+ * Settable from the MIPS core, cleared by the Net Device.
+ * The core should set the number of bytes it wants to send,
+ * then it should write those bytes of data to txDataBuffer.
+ * The device will clear txDataCount has been processed (not necessarily sent).
+ */
+ uint32_t txDataCount; /*0x10 */
+
+ /*
+ * Interrupt control
+ *
+ * Used to clear the interrupted generated by this dev.
+ * Write a 1 to clear the interrupt. (except bit31).
+ *
+ * Bit0 is set if it was a tx-done interrupt.
+ * Bit1 is set when new rx-data is available.
+ * Until this bit is cleared there will be no other RXs.
+ *
+ * Bit31 is used for testing, it clears after a read.
+ * Writing 1 to this bit will cause an interrupt to be generated.
+ * To clear the test interrupt, write 0 to this register.
+ */
+ uint32_t interruptControl; /*0x14 */
+#define MIPSNET_INTCTL_TXDONE ((uint32_t)(1<< 0))
+#define MIPSNET_INTCTL_RXDONE ((uint32_t)(1<< 1))
+#define MIPSNET_INTCTL_TESTBIT ((uint32_t)(1<<31))
+#define MIPSNET_INTCTL_ALLSOURCES (MIPSNET_INTCTL_TXDONE|MIPSNET_INTCTL_RXDONE|MIPSNET_INTCTL_TESTBIT)
+
+ /*
+ * Readonly core-specific interrupt info for the device to signal the core.
+ * The meaning of the contents of this field might change.
+ */
+ /*###\todo: the whole memIntf interrupt scheme is messy: the device should have
+ * no control what so ever of what VPE/register set is being used.
+ * The MemIntf should only expose interrupt lines, and something in the
+ * config should be responsible for the line<->core/vpe bindings.
+ */
+ uint32_t interruptInfo; /*0x18 */
+
+ /*
+ * This is where the received data is read out.
+ * There is more data to read until rxDataReady is 0.
+ * Only 1 byte at this regs offset is used.
+ */
+ uint32_t rxDataBuffer; /*0x1c */
+
+ /*
+ * This is where the data to transmit is written.
+ * Data should be written for the amount specified in the txDataCount register.
+ * Only 1 byte at this regs offset is used.
+ */
+ uint32_t txDataBuffer; /*0x20 */
+} MIPS_T_NetControl;
+
+#define MIPSNET_IO_EXTENT 0x40 /* being generous */
+
+#define field_offset(field) ((int)&((MIPS_T_NetControl*)(0))->field)
+
+#endif /* __MIPSNET_H */
diff --git a/drivers/net/myri_sbus.c b/drivers/net/myri_sbus.c
index f0996ce5c26..6c86dca62e2 100644
--- a/drivers/net/myri_sbus.c
+++ b/drivers/net/myri_sbus.c
@@ -277,7 +277,7 @@ static void myri_init_rings(struct myri_eth *mp, int from_irq)
struct recvq __iomem *rq = mp->rq;
struct myri_rxd __iomem *rxd = &rq->myri_rxd[0];
struct net_device *dev = mp->dev;
- int gfp_flags = GFP_KERNEL;
+ gfp_t gfp_flags = GFP_KERNEL;
int i;
if (from_irq || in_interrupt())
diff --git a/drivers/net/myri_sbus.h b/drivers/net/myri_sbus.h
index 9391e55a5e9..47722f708a4 100644
--- a/drivers/net/myri_sbus.h
+++ b/drivers/net/myri_sbus.h
@@ -296,7 +296,7 @@ struct myri_eth {
/* We use this to acquire receive skb's that we can DMA directly into. */
#define ALIGNED_RX_SKB_ADDR(addr) \
((((unsigned long)(addr) + (64 - 1)) & ~(64 - 1)) - (unsigned long)(addr))
-static inline struct sk_buff *myri_alloc_skb(unsigned int length, int gfp_flags)
+static inline struct sk_buff *myri_alloc_skb(unsigned int length, gfp_t gfp_flags)
{
struct sk_buff *skb;
diff --git a/drivers/net/ne.c b/drivers/net/ne.c
index d209a1556b2..0de8fdd2aa8 100644
--- a/drivers/net/ne.c
+++ b/drivers/net/ne.c
@@ -54,6 +54,10 @@ static const char version2[] =
#include <asm/system.h>
#include <asm/io.h>
+#if defined(CONFIG_TOSHIBA_RBTX4927) || defined(CONFIG_TOSHIBA_RBTX4938)
+#include <asm/tx4938/rbtx4938.h>
+#endif
+
#include "8390.h"
#define DRV_NAME "ne"
@@ -111,6 +115,9 @@ bad_clone_list[] __initdata = {
{"E-LAN100", "E-LAN200", {0x00, 0x00, 0x5d}}, /* Broken ne1000 clones */
{"PCM-4823", "PCM-4823", {0x00, 0xc0, 0x6c}}, /* Broken Advantech MoBo */
{"REALTEK", "RTL8019", {0x00, 0x00, 0xe8}}, /* no-name with Realtek chip */
+#if defined(CONFIG_TOSHIBA_RBTX4927) || defined(CONFIG_TOSHIBA_RBTX4938)
+ {"RBHMA4X00-RTL8019", "RBHMA4X00/RTL8019", {0x00, 0x60, 0x0a}}, /* Toshiba built-in */
+#endif
{"LCS-8834", "LCS-8836", {0x04, 0x04, 0x37}}, /* ShinyNet (SET) */
{NULL,}
};
@@ -226,6 +233,10 @@ struct net_device * __init ne_probe(int unit)
sprintf(dev->name, "eth%d", unit);
netdev_boot_setup_check(dev);
+#ifdef CONFIG_TOSHIBA_RBTX4938
+ dev->base_addr = 0x07f20280;
+ dev->irq = RBTX4938_RTL_8019_IRQ;
+#endif
err = do_ne_probe(dev);
if (err)
goto out;
@@ -506,6 +517,10 @@ static int __init ne_probe1(struct net_device *dev, int ioaddr)
ei_status.name = name;
ei_status.tx_start_page = start_page;
ei_status.stop_page = stop_page;
+#if defined(CONFIG_TOSHIBA_RBTX4927) || defined(CONFIG_TOSHIBA_RBTX4938)
+ wordlength = 1;
+#endif
+
#ifdef CONFIG_PLAT_OAKS32R
ei_status.word16 = 0;
#else
diff --git a/drivers/net/ne2k-pci.c b/drivers/net/ne2k-pci.c
index f1c01ac2910..e531a4eedfe 100644
--- a/drivers/net/ne2k-pci.c
+++ b/drivers/net/ne2k-pci.c
@@ -372,6 +372,7 @@ static int __devinit ne2k_pci_init_one (struct pci_dev *pdev,
printk("%2.2X%s", SA_prom[i], i == 5 ? ".\n": ":");
dev->dev_addr[i] = SA_prom[i];
}
+ memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
return 0;
@@ -637,6 +638,7 @@ static struct ethtool_ops ne2k_pci_ethtool_ops = {
.get_drvinfo = ne2k_pci_get_drvinfo,
.get_tx_csum = ethtool_op_get_tx_csum,
.get_sg = ethtool_op_get_sg,
+ .get_perm_addr = ethtool_op_get_perm_addr,
};
static void __devexit ne2k_pci_remove_one (struct pci_dev *pdev)
diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c
index e4811b42a6b..a3c3fc9c0d8 100644
--- a/drivers/net/ns83820.c
+++ b/drivers/net/ns83820.c
@@ -1632,8 +1632,7 @@ static void ns83820_run_bist(struct net_device *ndev, const char *name, u32 enab
timed_out = 1;
break;
}
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(1);
+ schedule_timeout_uninterruptible(1);
}
if (status & fail)
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index 113b6809921..70fe81a89df 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -22,8 +22,8 @@
*************************************************************************/
#define DRV_NAME "pcnet32"
-#define DRV_VERSION "1.30j"
-#define DRV_RELDATE "29.04.2005"
+#define DRV_VERSION "1.31a"
+#define DRV_RELDATE "12.Sep.2005"
#define PFX DRV_NAME ": "
static const char *version =
@@ -257,6 +257,9 @@ static int homepna[MAX_UNITS];
* v1.30h 24 Jun 2004 Don Fry correctly select auto, speed, duplex in bcr32.
* v1.30i 28 Jun 2004 Don Fry change to use module_param.
* v1.30j 29 Apr 2005 Don Fry fix skb/map leak with loopback test.
+ * v1.31 02 Sep 2005 Hubert WS Lin <wslin@tw.ibm.c0m> added set_ringparam().
+ * v1.31a 12 Sep 2005 Hubert WS Lin <wslin@tw.ibm.c0m> set min ring size to 4
+ * to allow loopback test to work unchanged.
*/
@@ -266,17 +269,17 @@ static int homepna[MAX_UNITS];
* That translates to 2 (4 == 2^^2) and 4 (16 == 2^^4).
*/
#ifndef PCNET32_LOG_TX_BUFFERS
-#define PCNET32_LOG_TX_BUFFERS 4
-#define PCNET32_LOG_RX_BUFFERS 5
+#define PCNET32_LOG_TX_BUFFERS 4
+#define PCNET32_LOG_RX_BUFFERS 5
+#define PCNET32_LOG_MAX_TX_BUFFERS 9 /* 2^9 == 512 */
+#define PCNET32_LOG_MAX_RX_BUFFERS 9
#endif
#define TX_RING_SIZE (1 << (PCNET32_LOG_TX_BUFFERS))
-#define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
-#define TX_RING_LEN_BITS ((PCNET32_LOG_TX_BUFFERS) << 12)
+#define TX_MAX_RING_SIZE (1 << (PCNET32_LOG_MAX_TX_BUFFERS))
#define RX_RING_SIZE (1 << (PCNET32_LOG_RX_BUFFERS))
-#define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
-#define RX_RING_LEN_BITS ((PCNET32_LOG_RX_BUFFERS) << 4)
+#define RX_MAX_RING_SIZE (1 << (PCNET32_LOG_MAX_RX_BUFFERS))
#define PKT_BUF_SZ 1544
@@ -334,14 +337,14 @@ struct pcnet32_access {
};
/*
- * The first three fields of pcnet32_private are read by the ethernet device
- * so we allocate the structure should be allocated by pci_alloc_consistent().
+ * The first field of pcnet32_private is read by the ethernet device
+ * so the structure should be allocated using pci_alloc_consistent().
*/
struct pcnet32_private {
- /* The Tx and Rx ring entries must be aligned on 16-byte boundaries in 32bit mode. */
- struct pcnet32_rx_head rx_ring[RX_RING_SIZE];
- struct pcnet32_tx_head tx_ring[TX_RING_SIZE];
struct pcnet32_init_block init_block;
+ /* The Tx and Rx ring entries must be aligned on 16-byte boundaries in 32bit mode. */
+ struct pcnet32_rx_head *rx_ring;
+ struct pcnet32_tx_head *tx_ring;
dma_addr_t dma_addr; /* DMA address of beginning of this
object, returned by
pci_alloc_consistent */
@@ -349,13 +352,21 @@ struct pcnet32_private {
structure */
const char *name;
/* The saved address of a sent-in-place packet/buffer, for skfree(). */
- struct sk_buff *tx_skbuff[TX_RING_SIZE];
- struct sk_buff *rx_skbuff[RX_RING_SIZE];
- dma_addr_t tx_dma_addr[TX_RING_SIZE];
- dma_addr_t rx_dma_addr[RX_RING_SIZE];
+ struct sk_buff **tx_skbuff;
+ struct sk_buff **rx_skbuff;
+ dma_addr_t *tx_dma_addr;
+ dma_addr_t *rx_dma_addr;
struct pcnet32_access a;
spinlock_t lock; /* Guard lock */
unsigned int cur_rx, cur_tx; /* The next free ring entry */
+ unsigned int rx_ring_size; /* current rx ring size */
+ unsigned int tx_ring_size; /* current tx ring size */
+ unsigned int rx_mod_mask; /* rx ring modular mask */
+ unsigned int tx_mod_mask; /* tx ring modular mask */
+ unsigned short rx_len_bits;
+ unsigned short tx_len_bits;
+ dma_addr_t rx_ring_dma_addr;
+ dma_addr_t tx_ring_dma_addr;
unsigned int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */
struct net_device_stats stats;
char tx_full;
@@ -397,6 +408,9 @@ static int pcnet32_get_regs_len(struct net_device *dev);
static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
void *ptr);
static void pcnet32_purge_tx_ring(struct net_device *dev);
+static int pcnet32_alloc_ring(struct net_device *dev);
+static void pcnet32_free_ring(struct net_device *dev);
+
enum pci_flags_bit {
PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4,
@@ -613,10 +627,62 @@ static void pcnet32_get_ringparam(struct net_device *dev, struct ethtool_ringpar
{
struct pcnet32_private *lp = dev->priv;
- ering->tx_max_pending = TX_RING_SIZE - 1;
- ering->tx_pending = lp->cur_tx - lp->dirty_tx;
- ering->rx_max_pending = RX_RING_SIZE - 1;
- ering->rx_pending = lp->cur_rx & RX_RING_MOD_MASK;
+ ering->tx_max_pending = TX_MAX_RING_SIZE - 1;
+ ering->tx_pending = lp->tx_ring_size - 1;
+ ering->rx_max_pending = RX_MAX_RING_SIZE - 1;
+ ering->rx_pending = lp->rx_ring_size - 1;
+}
+
+static int pcnet32_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
+{
+ struct pcnet32_private *lp = dev->priv;
+ unsigned long flags;
+ int i;
+
+ if (ering->rx_mini_pending || ering->rx_jumbo_pending)
+ return -EINVAL;
+
+ if (netif_running(dev))
+ pcnet32_close(dev);
+
+ spin_lock_irqsave(&lp->lock, flags);
+ pcnet32_free_ring(dev);
+ lp->tx_ring_size = min(ering->tx_pending, (unsigned int) TX_MAX_RING_SIZE);
+ lp->rx_ring_size = min(ering->rx_pending, (unsigned int) RX_MAX_RING_SIZE);
+
+ /* set the minimum ring size to 4, to allow the loopback test to work
+ * unchanged.
+ */
+ for (i = 2; i <= PCNET32_LOG_MAX_TX_BUFFERS; i++) {
+ if (lp->tx_ring_size <= (1 << i))
+ break;
+ }
+ lp->tx_ring_size = (1 << i);
+ lp->tx_mod_mask = lp->tx_ring_size - 1;
+ lp->tx_len_bits = (i << 12);
+
+ for (i = 2; i <= PCNET32_LOG_MAX_RX_BUFFERS; i++) {
+ if (lp->rx_ring_size <= (1 << i))
+ break;
+ }
+ lp->rx_ring_size = (1 << i);
+ lp->rx_mod_mask = lp->rx_ring_size - 1;
+ lp->rx_len_bits = (i << 4);
+
+ if (pcnet32_alloc_ring(dev)) {
+ pcnet32_free_ring(dev);
+ return -ENOMEM;
+ }
+
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ if (pcnet32_debug & NETIF_MSG_DRV)
+ printk(KERN_INFO PFX "Ring Param Settings: RX: %d, TX: %d\n", lp->rx_ring_size, lp->tx_ring_size);
+
+ if (netif_running(dev))
+ pcnet32_open(dev);
+
+ return 0;
}
static void pcnet32_get_strings(struct net_device *dev, u32 stringset, u8 *data)
@@ -948,6 +1014,7 @@ static struct ethtool_ops pcnet32_ethtool_ops = {
.nway_reset = pcnet32_nway_reset,
.get_link = pcnet32_get_link,
.get_ringparam = pcnet32_get_ringparam,
+ .set_ringparam = pcnet32_set_ringparam,
.get_tx_csum = ethtool_op_get_tx_csum,
.get_sg = ethtool_op_get_sg,
.get_tso = ethtool_op_get_tso,
@@ -957,6 +1024,7 @@ static struct ethtool_ops pcnet32_ethtool_ops = {
.phys_id = pcnet32_phys_id,
.get_regs_len = pcnet32_get_regs_len,
.get_regs = pcnet32_get_regs,
+ .get_perm_addr = ethtool_op_get_perm_addr,
};
/* only probes for non-PCI devices, the rest are handled by
@@ -1185,9 +1253,10 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
memcpy(dev->dev_addr, promaddr, 6);
}
}
+ memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
/* if the ethernet address is not valid, force to 00:00:00:00:00:00 */
- if (!is_valid_ether_addr(dev->dev_addr))
+ if (!is_valid_ether_addr(dev->perm_addr))
memset(dev->dev_addr, 0, sizeof(dev->dev_addr));
if (pcnet32_debug & NETIF_MSG_PROBE) {
@@ -1239,6 +1308,12 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
dev->priv = lp;
lp->name = chipname;
lp->shared_irq = shared;
+ lp->tx_ring_size = TX_RING_SIZE; /* default tx ring size */
+ lp->rx_ring_size = RX_RING_SIZE; /* default rx ring size */
+ lp->tx_mod_mask = lp->tx_ring_size - 1;
+ lp->rx_mod_mask = lp->rx_ring_size - 1;
+ lp->tx_len_bits = (PCNET32_LOG_TX_BUFFERS << 12);
+ lp->rx_len_bits = (PCNET32_LOG_RX_BUFFERS << 4);
lp->mii_if.full_duplex = fdx;
lp->mii_if.phy_id_mask = 0x1f;
lp->mii_if.reg_num_mask = 0x1f;
@@ -1265,21 +1340,23 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
}
lp->a = *a;
+ if (pcnet32_alloc_ring(dev)) {
+ ret = -ENOMEM;
+ goto err_free_ring;
+ }
/* detect special T1/E1 WAN card by checking for MAC address */
if (dev->dev_addr[0] == 0x00 && dev->dev_addr[1] == 0xe0
&& dev->dev_addr[2] == 0x75)
lp->options = PCNET32_PORT_FD | PCNET32_PORT_GPSI;
lp->init_block.mode = le16_to_cpu(0x0003); /* Disable Rx and Tx. */
- lp->init_block.tlen_rlen = le16_to_cpu(TX_RING_LEN_BITS | RX_RING_LEN_BITS);
+ lp->init_block.tlen_rlen = le16_to_cpu(lp->tx_len_bits | lp->rx_len_bits);
for (i = 0; i < 6; i++)
lp->init_block.phys_addr[i] = dev->dev_addr[i];
lp->init_block.filter[0] = 0x00000000;
lp->init_block.filter[1] = 0x00000000;
- lp->init_block.rx_ring = (u32)le32_to_cpu(lp->dma_addr +
- offsetof(struct pcnet32_private, rx_ring));
- lp->init_block.tx_ring = (u32)le32_to_cpu(lp->dma_addr +
- offsetof(struct pcnet32_private, tx_ring));
+ lp->init_block.rx_ring = (u32)le32_to_cpu(lp->rx_ring_dma_addr);
+ lp->init_block.tx_ring = (u32)le32_to_cpu(lp->tx_ring_dma_addr);
/* switch pcnet32 to 32bit mode */
a->write_bcr(ioaddr, 20, 2);
@@ -1310,7 +1387,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
if (pcnet32_debug & NETIF_MSG_PROBE)
printk(", failed to detect IRQ line.\n");
ret = -ENODEV;
- goto err_free_consistent;
+ goto err_free_ring;
}
if (pcnet32_debug & NETIF_MSG_PROBE)
printk(", probed IRQ %d.\n", dev->irq);
@@ -1341,7 +1418,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
/* Fill in the generic fields of the device structure. */
if (register_netdev(dev))
- goto err_free_consistent;
+ goto err_free_ring;
if (pdev) {
pci_set_drvdata(pdev, dev);
@@ -1359,6 +1436,8 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
return 0;
+err_free_ring:
+ pcnet32_free_ring(dev);
err_free_consistent:
pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr);
err_free_netdev:
@@ -1369,6 +1448,86 @@ err_release_region:
}
+static int pcnet32_alloc_ring(struct net_device *dev)
+{
+ struct pcnet32_private *lp = dev->priv;
+
+ if ((lp->tx_ring = pci_alloc_consistent(lp->pci_dev, sizeof(struct pcnet32_tx_head) * lp->tx_ring_size,
+ &lp->tx_ring_dma_addr)) == NULL) {
+ if (pcnet32_debug & NETIF_MSG_DRV)
+ printk(KERN_ERR PFX "Consistent memory allocation failed.\n");
+ return -ENOMEM;
+ }
+
+ if ((lp->rx_ring = pci_alloc_consistent(lp->pci_dev, sizeof(struct pcnet32_rx_head) * lp->rx_ring_size,
+ &lp->rx_ring_dma_addr)) == NULL) {
+ if (pcnet32_debug & NETIF_MSG_DRV)
+ printk(KERN_ERR PFX "Consistent memory allocation failed.\n");
+ return -ENOMEM;
+ }
+
+ if (!(lp->tx_dma_addr = kmalloc(sizeof(dma_addr_t) * lp->tx_ring_size, GFP_ATOMIC))) {
+ if (pcnet32_debug & NETIF_MSG_DRV)
+ printk(KERN_ERR PFX "Memory allocation failed.\n");
+ return -ENOMEM;
+ }
+ memset(lp->tx_dma_addr, 0, sizeof(dma_addr_t) * lp->tx_ring_size);
+
+ if (!(lp->rx_dma_addr = kmalloc(sizeof(dma_addr_t) * lp->rx_ring_size, GFP_ATOMIC))) {
+ if (pcnet32_debug & NETIF_MSG_DRV)
+ printk(KERN_ERR PFX "Memory allocation failed.\n");
+ return -ENOMEM;
+ }
+ memset(lp->rx_dma_addr, 0, sizeof(dma_addr_t) * lp->rx_ring_size);
+
+ if (!(lp->tx_skbuff = kmalloc(sizeof(struct sk_buff *) * lp->tx_ring_size, GFP_ATOMIC))) {
+ if (pcnet32_debug & NETIF_MSG_DRV)
+ printk(KERN_ERR PFX "Memory allocation failed.\n");
+ return -ENOMEM;
+ }
+ memset(lp->tx_skbuff, 0, sizeof(struct sk_buff *) * lp->tx_ring_size);
+
+ if (!(lp->rx_skbuff = kmalloc(sizeof(struct sk_buff *) * lp->rx_ring_size, GFP_ATOMIC))) {
+ if (pcnet32_debug & NETIF_MSG_DRV)
+ printk(KERN_ERR PFX "Memory allocation failed.\n");
+ return -ENOMEM;
+ }
+ memset(lp->rx_skbuff, 0, sizeof(struct sk_buff *) * lp->rx_ring_size);
+
+ return 0;
+}
+
+
+static void pcnet32_free_ring(struct net_device *dev)
+{
+ struct pcnet32_private *lp = dev->priv;
+
+ kfree(lp->tx_skbuff);
+ lp->tx_skbuff = NULL;
+
+ kfree(lp->rx_skbuff);
+ lp->rx_skbuff = NULL;
+
+ kfree(lp->tx_dma_addr);
+ lp->tx_dma_addr = NULL;
+
+ kfree(lp->rx_dma_addr);
+ lp->rx_dma_addr = NULL;
+
+ if (lp->tx_ring) {
+ pci_free_consistent(lp->pci_dev, sizeof(struct pcnet32_tx_head) * lp->tx_ring_size,
+ lp->tx_ring, lp->tx_ring_dma_addr);
+ lp->tx_ring = NULL;
+ }
+
+ if (lp->rx_ring) {
+ pci_free_consistent(lp->pci_dev, sizeof(struct pcnet32_rx_head) * lp->rx_ring_size,
+ lp->rx_ring, lp->rx_ring_dma_addr);
+ lp->rx_ring = NULL;
+ }
+}
+
+
static int
pcnet32_open(struct net_device *dev)
{
@@ -1400,8 +1559,8 @@ pcnet32_open(struct net_device *dev)
if (netif_msg_ifup(lp))
printk(KERN_DEBUG "%s: pcnet32_open() irq %d tx/rx rings %#x/%#x init %#x.\n",
dev->name, dev->irq,
- (u32) (lp->dma_addr + offsetof(struct pcnet32_private, tx_ring)),
- (u32) (lp->dma_addr + offsetof(struct pcnet32_private, rx_ring)),
+ (u32) (lp->tx_ring_dma_addr),
+ (u32) (lp->rx_ring_dma_addr),
(u32) (lp->dma_addr + offsetof(struct pcnet32_private, init_block)));
/* set/reset autoselect bit */
@@ -1521,7 +1680,7 @@ pcnet32_open(struct net_device *dev)
err_free_ring:
/* free any allocated skbuffs */
- for (i = 0; i < RX_RING_SIZE; i++) {
+ for (i = 0; i < lp->rx_ring_size; i++) {
lp->rx_ring[i].status = 0;
if (lp->rx_skbuff[i]) {
pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i], PKT_BUF_SZ-2,
@@ -1531,6 +1690,9 @@ err_free_ring:
lp->rx_skbuff[i] = NULL;
lp->rx_dma_addr[i] = 0;
}
+
+ pcnet32_free_ring(dev);
+
/*
* Switch back to 16bit mode to avoid problems with dumb
* DOS packet driver after a warm reboot
@@ -1562,7 +1724,7 @@ pcnet32_purge_tx_ring(struct net_device *dev)
struct pcnet32_private *lp = dev->priv;
int i;
- for (i = 0; i < TX_RING_SIZE; i++) {
+ for (i = 0; i < lp->tx_ring_size; i++) {
lp->tx_ring[i].status = 0; /* CPU owns buffer */
wmb(); /* Make sure adapter sees owner change */
if (lp->tx_skbuff[i]) {
@@ -1587,7 +1749,7 @@ pcnet32_init_ring(struct net_device *dev)
lp->cur_rx = lp->cur_tx = 0;
lp->dirty_rx = lp->dirty_tx = 0;
- for (i = 0; i < RX_RING_SIZE; i++) {
+ for (i = 0; i < lp->rx_ring_size; i++) {
struct sk_buff *rx_skbuff = lp->rx_skbuff[i];
if (rx_skbuff == NULL) {
if (!(rx_skbuff = lp->rx_skbuff[i] = dev_alloc_skb (PKT_BUF_SZ))) {
@@ -1611,20 +1773,18 @@ pcnet32_init_ring(struct net_device *dev)
}
/* The Tx buffer address is filled in as needed, but we do need to clear
* the upper ownership bit. */
- for (i = 0; i < TX_RING_SIZE; i++) {
+ for (i = 0; i < lp->tx_ring_size; i++) {
lp->tx_ring[i].status = 0; /* CPU owns buffer */
wmb(); /* Make sure adapter sees owner change */
lp->tx_ring[i].base = 0;
lp->tx_dma_addr[i] = 0;
}
- lp->init_block.tlen_rlen = le16_to_cpu(TX_RING_LEN_BITS | RX_RING_LEN_BITS);
+ lp->init_block.tlen_rlen = le16_to_cpu(lp->tx_len_bits | lp->rx_len_bits);
for (i = 0; i < 6; i++)
lp->init_block.phys_addr[i] = dev->dev_addr[i];
- lp->init_block.rx_ring = (u32)le32_to_cpu(lp->dma_addr +
- offsetof(struct pcnet32_private, rx_ring));
- lp->init_block.tx_ring = (u32)le32_to_cpu(lp->dma_addr +
- offsetof(struct pcnet32_private, tx_ring));
+ lp->init_block.rx_ring = (u32)le32_to_cpu(lp->rx_ring_dma_addr);
+ lp->init_block.tx_ring = (u32)le32_to_cpu(lp->tx_ring_dma_addr);
wmb(); /* Make sure all changes are visible */
return 0;
}
@@ -1682,13 +1842,13 @@ pcnet32_tx_timeout (struct net_device *dev)
printk(KERN_DEBUG " Ring data dump: dirty_tx %d cur_tx %d%s cur_rx %d.",
lp->dirty_tx, lp->cur_tx, lp->tx_full ? " (full)" : "",
lp->cur_rx);
- for (i = 0 ; i < RX_RING_SIZE; i++)
+ for (i = 0 ; i < lp->rx_ring_size; i++)
printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ",
le32_to_cpu(lp->rx_ring[i].base),
(-le16_to_cpu(lp->rx_ring[i].buf_length)) & 0xffff,
le32_to_cpu(lp->rx_ring[i].msg_length),
le16_to_cpu(lp->rx_ring[i].status));
- for (i = 0 ; i < TX_RING_SIZE; i++)
+ for (i = 0 ; i < lp->tx_ring_size; i++)
printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ",
le32_to_cpu(lp->tx_ring[i].base),
(-le16_to_cpu(lp->tx_ring[i].length)) & 0xffff,
@@ -1729,7 +1889,7 @@ pcnet32_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* Fill in a Tx ring entry */
/* Mask to ring buffer boundary. */
- entry = lp->cur_tx & TX_RING_MOD_MASK;
+ entry = lp->cur_tx & lp->tx_mod_mask;
/* Caution: the write order is important here, set the status
* with the "ownership" bits last. */
@@ -1753,7 +1913,7 @@ pcnet32_start_xmit(struct sk_buff *skb, struct net_device *dev)
dev->trans_start = jiffies;
- if (lp->tx_ring[(entry+1) & TX_RING_MOD_MASK].base != 0) {
+ if (lp->tx_ring[(entry+1) & lp->tx_mod_mask].base != 0) {
lp->tx_full = 1;
netif_stop_queue(dev);
}
@@ -1806,7 +1966,7 @@ pcnet32_interrupt(int irq, void *dev_id, struct pt_regs * regs)
int delta;
while (dirty_tx != lp->cur_tx) {
- int entry = dirty_tx & TX_RING_MOD_MASK;
+ int entry = dirty_tx & lp->tx_mod_mask;
int status = (short)le16_to_cpu(lp->tx_ring[entry].status);
if (status < 0)
@@ -1864,18 +2024,18 @@ pcnet32_interrupt(int irq, void *dev_id, struct pt_regs * regs)
dirty_tx++;
}
- delta = (lp->cur_tx - dirty_tx) & (TX_RING_MOD_MASK + TX_RING_SIZE);
- if (delta > TX_RING_SIZE) {
+ delta = (lp->cur_tx - dirty_tx) & (lp->tx_mod_mask + lp->tx_ring_size);
+ if (delta > lp->tx_ring_size) {
if (netif_msg_drv(lp))
printk(KERN_ERR "%s: out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
dev->name, dirty_tx, lp->cur_tx, lp->tx_full);
- dirty_tx += TX_RING_SIZE;
- delta -= TX_RING_SIZE;
+ dirty_tx += lp->tx_ring_size;
+ delta -= lp->tx_ring_size;
}
if (lp->tx_full &&
netif_queue_stopped(dev) &&
- delta < TX_RING_SIZE - 2) {
+ delta < lp->tx_ring_size - 2) {
/* The ring is no longer full, clear tbusy. */
lp->tx_full = 0;
netif_wake_queue (dev);
@@ -1932,8 +2092,8 @@ static int
pcnet32_rx(struct net_device *dev)
{
struct pcnet32_private *lp = dev->priv;
- int entry = lp->cur_rx & RX_RING_MOD_MASK;
- int boguscnt = RX_RING_SIZE / 2;
+ int entry = lp->cur_rx & lp->rx_mod_mask;
+ int boguscnt = lp->rx_ring_size / 2;
/* If we own the next entry, it's a new packet. Send it up. */
while ((short)le16_to_cpu(lp->rx_ring[entry].status) >= 0) {
@@ -1998,12 +2158,12 @@ pcnet32_rx(struct net_device *dev)
if (netif_msg_drv(lp))
printk(KERN_ERR "%s: Memory squeeze, deferring packet.\n",
dev->name);
- for (i = 0; i < RX_RING_SIZE; i++)
+ for (i = 0; i < lp->rx_ring_size; i++)
if ((short)le16_to_cpu(lp->rx_ring[(entry+i)
- & RX_RING_MOD_MASK].status) < 0)
+ & lp->rx_mod_mask].status) < 0)
break;
- if (i > RX_RING_SIZE -2) {
+ if (i > lp->rx_ring_size -2) {
lp->stats.rx_dropped++;
lp->rx_ring[entry].status |= le16_to_cpu(0x8000);
wmb(); /* Make sure adapter sees owner change */
@@ -2041,7 +2201,7 @@ pcnet32_rx(struct net_device *dev)
lp->rx_ring[entry].buf_length = le16_to_cpu(2-PKT_BUF_SZ);
wmb(); /* Make sure owner changes after all others are visible */
lp->rx_ring[entry].status |= le16_to_cpu(0x8000);
- entry = (++lp->cur_rx) & RX_RING_MOD_MASK;
+ entry = (++lp->cur_rx) & lp->rx_mod_mask;
if (--boguscnt <= 0) break; /* don't stay in loop forever */
}
@@ -2084,7 +2244,7 @@ pcnet32_close(struct net_device *dev)
spin_lock_irqsave(&lp->lock, flags);
/* free all allocated skbuffs */
- for (i = 0; i < RX_RING_SIZE; i++) {
+ for (i = 0; i < lp->rx_ring_size; i++) {
lp->rx_ring[i].status = 0;
wmb(); /* Make sure adapter sees owner change */
if (lp->rx_skbuff[i]) {
@@ -2096,7 +2256,7 @@ pcnet32_close(struct net_device *dev)
lp->rx_dma_addr[i] = 0;
}
- for (i = 0; i < TX_RING_SIZE; i++) {
+ for (i = 0; i < lp->tx_ring_size; i++) {
lp->tx_ring[i].status = 0; /* CPU owns buffer */
wmb(); /* Make sure adapter sees owner change */
if (lp->tx_skbuff[i]) {
@@ -2265,6 +2425,7 @@ static void __devexit pcnet32_remove_one(struct pci_dev *pdev)
struct pcnet32_private *lp = dev->priv;
unregister_netdev(dev);
+ pcnet32_free_ring(dev);
release_region(dev->base_addr, PCNET32_TOTAL_SIZE);
pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr);
free_netdev(dev);
@@ -2340,6 +2501,7 @@ static void __exit pcnet32_cleanup_module(void)
struct pcnet32_private *lp = pcnet32_dev->priv;
next_dev = lp->next;
unregister_netdev(pcnet32_dev);
+ pcnet32_free_ring(pcnet32_dev);
release_region(pcnet32_dev->base_addr, PCNET32_TOTAL_SIZE);
pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr);
free_netdev(pcnet32_dev);
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 14f4de1a818..c782a632980 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -12,14 +12,6 @@ config PHYLIB
devices. This option provides infrastructure for
managing PHY devices.
-config PHYCONTROL
- bool " Support for automatically handling PHY state changes"
- depends on PHYLIB
- help
- Adds code to perform all the work for keeping PHY link
- state (speed/duplex/etc) up-to-date. Also handles
- interrupts.
-
comment "MII PHY device drivers"
depends on PHYLIB
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index d9e11f93bf3..9209da9dde0 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -242,10 +242,6 @@ EXPORT_SYMBOL(phy_sanitize_settings);
* choose the next best ones from the ones selected, so we don't
* care if ethtool tries to give us bad values
*
- * A note about the PHYCONTROL Layer. If you turn off
- * CONFIG_PHYCONTROL, you will need to read the PHY status
- * registers after this function completes, and update your
- * controller manually.
*/
int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd)
{
@@ -380,7 +376,6 @@ int phy_start_aneg(struct phy_device *phydev)
err = phydev->drv->config_aneg(phydev);
-#ifdef CONFIG_PHYCONTROL
if (err < 0)
goto out_unlock;
@@ -395,14 +390,12 @@ int phy_start_aneg(struct phy_device *phydev)
}
out_unlock:
-#endif
spin_unlock(&phydev->lock);
return err;
}
EXPORT_SYMBOL(phy_start_aneg);
-#ifdef CONFIG_PHYCONTROL
static void phy_change(void *data);
static void phy_timer(unsigned long data);
@@ -868,4 +861,3 @@ static void phy_timer(unsigned long data)
mod_timer(&phydev->phy_timer, jiffies + PHY_STATE_TIME * HZ);
}
-#endif /* CONFIG_PHYCONTROL */
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 33f7bdb5857..6da1aa0706a 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -101,7 +101,6 @@ struct phy_device * get_phy_device(struct mii_bus *bus, int addr)
return dev;
}
-#ifdef CONFIG_PHYCONTROL
/* phy_prepare_link:
*
* description: Tells the PHY infrastructure to handle the
@@ -160,8 +159,6 @@ void phy_disconnect(struct phy_device *phydev)
}
EXPORT_SYMBOL(phy_disconnect);
-#endif /* CONFIG_PHYCONTROL */
-
/* phy_attach:
*
* description: Called by drivers to attach to a particular PHY
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index afb3f186b88..159b56a56ef 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -1027,6 +1027,7 @@ static struct ethtool_ops rtl8169_ethtool_ops = {
.get_strings = rtl8169_get_strings,
.get_stats_count = rtl8169_get_stats_count,
.get_ethtool_stats = rtl8169_get_ethtool_stats,
+ .get_perm_addr = ethtool_op_get_perm_addr,
};
static void rtl8169_write_gmii_reg_bit(void __iomem *ioaddr, int reg, int bitnum,
@@ -1511,6 +1512,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Get MAC address. FIXME: read EEPROM */
for (i = 0; i < MAC_ADDR_LEN; i++)
dev->dev_addr[i] = RTL_R8(MAC0 + i);
+ memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
dev->open = rtl8169_open;
dev->hard_start_xmit = rtl8169_start_xmit;
diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
new file mode 100644
index 00000000000..12cde060458
--- /dev/null
+++ b/drivers/net/rionet.c
@@ -0,0 +1,574 @@
+/*
+ * rionet - Ethernet driver over RapidIO messaging services
+ *
+ * Copyright 2005 MontaVista Software, Inc.
+ * Matt Porter <mporter@kernel.crashing.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/rio.h>
+#include <linux/rio_drv.h>
+#include <linux/rio_ids.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/crc32.h>
+#include <linux/ethtool.h>
+
+#define DRV_NAME "rionet"
+#define DRV_VERSION "0.2"
+#define DRV_AUTHOR "Matt Porter <mporter@kernel.crashing.org>"
+#define DRV_DESC "Ethernet over RapidIO"
+
+MODULE_AUTHOR(DRV_AUTHOR);
+MODULE_DESCRIPTION(DRV_DESC);
+MODULE_LICENSE("GPL");
+
+#define RIONET_DEFAULT_MSGLEVEL \
+ (NETIF_MSG_DRV | \
+ NETIF_MSG_LINK | \
+ NETIF_MSG_RX_ERR | \
+ NETIF_MSG_TX_ERR)
+
+#define RIONET_DOORBELL_JOIN 0x1000
+#define RIONET_DOORBELL_LEAVE 0x1001
+
+#define RIONET_MAILBOX 0
+
+#define RIONET_TX_RING_SIZE CONFIG_RIONET_TX_SIZE
+#define RIONET_RX_RING_SIZE CONFIG_RIONET_RX_SIZE
+
+static LIST_HEAD(rionet_peers);
+
+struct rionet_private {
+ struct rio_mport *mport;
+ struct sk_buff *rx_skb[RIONET_RX_RING_SIZE];
+ struct sk_buff *tx_skb[RIONET_TX_RING_SIZE];
+ struct net_device_stats stats;
+ int rx_slot;
+ int tx_slot;
+ int tx_cnt;
+ int ack_slot;
+ spinlock_t lock;
+ spinlock_t tx_lock;
+ u32 msg_enable;
+};
+
+struct rionet_peer {
+ struct list_head node;
+ struct rio_dev *rdev;
+ struct resource *res;
+};
+
+static int rionet_check = 0;
+static int rionet_capable = 1;
+
+/*
+ * This is a fast lookup table for for translating TX
+ * Ethernet packets into a destination RIO device. It
+ * could be made into a hash table to save memory depending
+ * on system trade-offs.
+ */
+static struct rio_dev *rionet_active[RIO_MAX_ROUTE_ENTRIES];
+
+#define is_rionet_capable(pef, src_ops, dst_ops) \
+ ((pef & RIO_PEF_INB_MBOX) && \
+ (pef & RIO_PEF_INB_DOORBELL) && \
+ (src_ops & RIO_SRC_OPS_DOORBELL) && \
+ (dst_ops & RIO_DST_OPS_DOORBELL))
+#define dev_rionet_capable(dev) \
+ is_rionet_capable(dev->pef, dev->src_ops, dev->dst_ops)
+
+#define RIONET_MAC_MATCH(x) (*(u32 *)x == 0x00010001)
+#define RIONET_GET_DESTID(x) (*(u16 *)(x + 4))
+
+static struct net_device_stats *rionet_stats(struct net_device *ndev)
+{
+ struct rionet_private *rnet = ndev->priv;
+ return &rnet->stats;
+}
+
+static int rionet_rx_clean(struct net_device *ndev)
+{
+ int i;
+ int error = 0;
+ struct rionet_private *rnet = ndev->priv;
+ void *data;
+
+ i = rnet->rx_slot;
+
+ do {
+ if (!rnet->rx_skb[i])
+ continue;
+
+ if (!(data = rio_get_inb_message(rnet->mport, RIONET_MAILBOX)))
+ break;
+
+ rnet->rx_skb[i]->data = data;
+ skb_put(rnet->rx_skb[i], RIO_MAX_MSG_SIZE);
+ rnet->rx_skb[i]->dev = ndev;
+ rnet->rx_skb[i]->protocol =
+ eth_type_trans(rnet->rx_skb[i], ndev);
+ error = netif_rx(rnet->rx_skb[i]);
+
+ if (error == NET_RX_DROP) {
+ rnet->stats.rx_dropped++;
+ } else if (error == NET_RX_BAD) {
+ if (netif_msg_rx_err(rnet))
+ printk(KERN_WARNING "%s: bad rx packet\n",
+ DRV_NAME);
+ rnet->stats.rx_errors++;
+ } else {
+ rnet->stats.rx_packets++;
+ rnet->stats.rx_bytes += RIO_MAX_MSG_SIZE;
+ }
+
+ } while ((i = (i + 1) % RIONET_RX_RING_SIZE) != rnet->rx_slot);
+
+ return i;
+}
+
+static void rionet_rx_fill(struct net_device *ndev, int end)
+{
+ int i;
+ struct rionet_private *rnet = ndev->priv;
+
+ i = rnet->rx_slot;
+ do {
+ rnet->rx_skb[i] = dev_alloc_skb(RIO_MAX_MSG_SIZE);
+
+ if (!rnet->rx_skb[i])
+ break;
+
+ rio_add_inb_buffer(rnet->mport, RIONET_MAILBOX,
+ rnet->rx_skb[i]->data);
+ } while ((i = (i + 1) % RIONET_RX_RING_SIZE) != end);
+
+ rnet->rx_slot = i;
+}
+
+static int rionet_queue_tx_msg(struct sk_buff *skb, struct net_device *ndev,
+ struct rio_dev *rdev)
+{
+ struct rionet_private *rnet = ndev->priv;
+
+ rio_add_outb_message(rnet->mport, rdev, 0, skb->data, skb->len);
+ rnet->tx_skb[rnet->tx_slot] = skb;
+
+ rnet->stats.tx_packets++;
+ rnet->stats.tx_bytes += skb->len;
+
+ if (++rnet->tx_cnt == RIONET_TX_RING_SIZE)
+ netif_stop_queue(ndev);
+
+ ++rnet->tx_slot;
+ rnet->tx_slot &= (RIONET_TX_RING_SIZE - 1);
+
+ if (netif_msg_tx_queued(rnet))
+ printk(KERN_INFO "%s: queued skb %8.8x len %8.8x\n", DRV_NAME,
+ (u32) skb, skb->len);
+
+ return 0;
+}
+
+static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ int i;
+ struct rionet_private *rnet = ndev->priv;
+ struct ethhdr *eth = (struct ethhdr *)skb->data;
+ u16 destid;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ if (!spin_trylock(&rnet->tx_lock)) {
+ local_irq_restore(flags);
+ return NETDEV_TX_LOCKED;
+ }
+
+ if ((rnet->tx_cnt + 1) > RIONET_TX_RING_SIZE) {
+ netif_stop_queue(ndev);
+ spin_unlock_irqrestore(&rnet->tx_lock, flags);
+ printk(KERN_ERR "%s: BUG! Tx Ring full when queue awake!\n",
+ ndev->name);
+ return NETDEV_TX_BUSY;
+ }
+
+ if (eth->h_dest[0] & 0x01) {
+ for (i = 0; i < RIO_MAX_ROUTE_ENTRIES; i++)
+ if (rionet_active[i])
+ rionet_queue_tx_msg(skb, ndev,
+ rionet_active[i]);
+ } else if (RIONET_MAC_MATCH(eth->h_dest)) {
+ destid = RIONET_GET_DESTID(eth->h_dest);
+ if (rionet_active[destid])
+ rionet_queue_tx_msg(skb, ndev, rionet_active[destid]);
+ }
+
+ spin_unlock_irqrestore(&rnet->tx_lock, flags);
+
+ return 0;
+}
+
+static void rionet_dbell_event(struct rio_mport *mport, void *dev_id, u16 sid, u16 tid,
+ u16 info)
+{
+ struct net_device *ndev = dev_id;
+ struct rionet_private *rnet = ndev->priv;
+ struct rionet_peer *peer;
+
+ if (netif_msg_intr(rnet))
+ printk(KERN_INFO "%s: doorbell sid %4.4x tid %4.4x info %4.4x",
+ DRV_NAME, sid, tid, info);
+ if (info == RIONET_DOORBELL_JOIN) {
+ if (!rionet_active[sid]) {
+ list_for_each_entry(peer, &rionet_peers, node) {
+ if (peer->rdev->destid == sid)
+ rionet_active[sid] = peer->rdev;
+ }
+ rio_mport_send_doorbell(mport, sid,
+ RIONET_DOORBELL_JOIN);
+ }
+ } else if (info == RIONET_DOORBELL_LEAVE) {
+ rionet_active[sid] = NULL;
+ } else {
+ if (netif_msg_intr(rnet))
+ printk(KERN_WARNING "%s: unhandled doorbell\n",
+ DRV_NAME);
+ }
+}
+
+static void rionet_inb_msg_event(struct rio_mport *mport, void *dev_id, int mbox, int slot)
+{
+ int n;
+ struct net_device *ndev = dev_id;
+ struct rionet_private *rnet = (struct rionet_private *)ndev->priv;
+
+ if (netif_msg_intr(rnet))
+ printk(KERN_INFO "%s: inbound message event, mbox %d slot %d\n",
+ DRV_NAME, mbox, slot);
+
+ spin_lock(&rnet->lock);
+ if ((n = rionet_rx_clean(ndev)) != rnet->rx_slot)
+ rionet_rx_fill(ndev, n);
+ spin_unlock(&rnet->lock);
+}
+
+static void rionet_outb_msg_event(struct rio_mport *mport, void *dev_id, int mbox, int slot)
+{
+ struct net_device *ndev = dev_id;
+ struct rionet_private *rnet = ndev->priv;
+
+ spin_lock(&rnet->lock);
+
+ if (netif_msg_intr(rnet))
+ printk(KERN_INFO
+ "%s: outbound message event, mbox %d slot %d\n",
+ DRV_NAME, mbox, slot);
+
+ while (rnet->tx_cnt && (rnet->ack_slot != slot)) {
+ /* dma unmap single */
+ dev_kfree_skb_irq(rnet->tx_skb[rnet->ack_slot]);
+ rnet->tx_skb[rnet->ack_slot] = NULL;
+ ++rnet->ack_slot;
+ rnet->ack_slot &= (RIONET_TX_RING_SIZE - 1);
+ rnet->tx_cnt--;
+ }
+
+ if (rnet->tx_cnt < RIONET_TX_RING_SIZE)
+ netif_wake_queue(ndev);
+
+ spin_unlock(&rnet->lock);
+}
+
+static int rionet_open(struct net_device *ndev)
+{
+ int i, rc = 0;
+ struct rionet_peer *peer, *tmp;
+ u32 pwdcsr;
+ struct rionet_private *rnet = ndev->priv;
+
+ if (netif_msg_ifup(rnet))
+ printk(KERN_INFO "%s: open\n", DRV_NAME);
+
+ if ((rc = rio_request_inb_dbell(rnet->mport,
+ (void *)ndev,
+ RIONET_DOORBELL_JOIN,
+ RIONET_DOORBELL_LEAVE,
+ rionet_dbell_event)) < 0)
+ goto out;
+
+ if ((rc = rio_request_inb_mbox(rnet->mport,
+ (void *)ndev,
+ RIONET_MAILBOX,
+ RIONET_RX_RING_SIZE,
+ rionet_inb_msg_event)) < 0)
+ goto out;
+
+ if ((rc = rio_request_outb_mbox(rnet->mport,
+ (void *)ndev,
+ RIONET_MAILBOX,
+ RIONET_TX_RING_SIZE,
+ rionet_outb_msg_event)) < 0)
+ goto out;
+
+ /* Initialize inbound message ring */
+ for (i = 0; i < RIONET_RX_RING_SIZE; i++)
+ rnet->rx_skb[i] = NULL;
+ rnet->rx_slot = 0;
+ rionet_rx_fill(ndev, 0);
+
+ rnet->tx_slot = 0;
+ rnet->tx_cnt = 0;
+ rnet->ack_slot = 0;
+
+ netif_carrier_on(ndev);
+ netif_start_queue(ndev);
+
+ list_for_each_entry_safe(peer, tmp, &rionet_peers, node) {
+ if (!(peer->res = rio_request_outb_dbell(peer->rdev,
+ RIONET_DOORBELL_JOIN,
+ RIONET_DOORBELL_LEAVE)))
+ {
+ printk(KERN_ERR "%s: error requesting doorbells\n",
+ DRV_NAME);
+ continue;
+ }
+
+ /*
+ * If device has initialized inbound doorbells,
+ * send a join message
+ */
+ rio_read_config_32(peer->rdev, RIO_WRITE_PORT_CSR, &pwdcsr);
+ if (pwdcsr & RIO_DOORBELL_AVAIL)
+ rio_send_doorbell(peer->rdev, RIONET_DOORBELL_JOIN);
+ }
+
+ out:
+ return rc;
+}
+
+static int rionet_close(struct net_device *ndev)
+{
+ struct rionet_private *rnet = (struct rionet_private *)ndev->priv;
+ struct rionet_peer *peer, *tmp;
+ int i;
+
+ if (netif_msg_ifup(rnet))
+ printk(KERN_INFO "%s: close\n", DRV_NAME);
+
+ netif_stop_queue(ndev);
+ netif_carrier_off(ndev);
+
+ for (i = 0; i < RIONET_RX_RING_SIZE; i++)
+ if (rnet->rx_skb[i])
+ kfree_skb(rnet->rx_skb[i]);
+
+ list_for_each_entry_safe(peer, tmp, &rionet_peers, node) {
+ if (rionet_active[peer->rdev->destid]) {
+ rio_send_doorbell(peer->rdev, RIONET_DOORBELL_LEAVE);
+ rionet_active[peer->rdev->destid] = NULL;
+ }
+ rio_release_outb_dbell(peer->rdev, peer->res);
+ }
+
+ rio_release_inb_dbell(rnet->mport, RIONET_DOORBELL_JOIN,
+ RIONET_DOORBELL_LEAVE);
+ rio_release_inb_mbox(rnet->mport, RIONET_MAILBOX);
+ rio_release_outb_mbox(rnet->mport, RIONET_MAILBOX);
+
+ return 0;
+}
+
+static void rionet_remove(struct rio_dev *rdev)
+{
+ struct net_device *ndev = NULL;
+ struct rionet_peer *peer, *tmp;
+
+ unregister_netdev(ndev);
+ kfree(ndev);
+
+ list_for_each_entry_safe(peer, tmp, &rionet_peers, node) {
+ list_del(&peer->node);
+ kfree(peer);
+ }
+}
+
+static void rionet_get_drvinfo(struct net_device *ndev,
+ struct ethtool_drvinfo *info)
+{
+ struct rionet_private *rnet = ndev->priv;
+
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, DRV_VERSION);
+ strcpy(info->fw_version, "n/a");
+ strcpy(info->bus_info, rnet->mport->name);
+}
+
+static u32 rionet_get_msglevel(struct net_device *ndev)
+{
+ struct rionet_private *rnet = ndev->priv;
+
+ return rnet->msg_enable;
+}
+
+static void rionet_set_msglevel(struct net_device *ndev, u32 value)
+{
+ struct rionet_private *rnet = ndev->priv;
+
+ rnet->msg_enable = value;
+}
+
+static struct ethtool_ops rionet_ethtool_ops = {
+ .get_drvinfo = rionet_get_drvinfo,
+ .get_msglevel = rionet_get_msglevel,
+ .set_msglevel = rionet_set_msglevel,
+ .get_link = ethtool_op_get_link,
+};
+
+static int rionet_setup_netdev(struct rio_mport *mport)
+{
+ int rc = 0;
+ struct net_device *ndev = NULL;
+ struct rionet_private *rnet;
+ u16 device_id;
+
+ /* Allocate our net_device structure */
+ ndev = alloc_etherdev(sizeof(struct rionet_private));
+ if (ndev == NULL) {
+ printk(KERN_INFO "%s: could not allocate ethernet device.\n",
+ DRV_NAME);
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ /* Set up private area */
+ rnet = (struct rionet_private *)ndev->priv;
+ rnet->mport = mport;
+
+ /* Set the default MAC address */
+ device_id = rio_local_get_device_id(mport);
+ ndev->dev_addr[0] = 0x00;
+ ndev->dev_addr[1] = 0x01;
+ ndev->dev_addr[2] = 0x00;
+ ndev->dev_addr[3] = 0x01;
+ ndev->dev_addr[4] = device_id >> 8;
+ ndev->dev_addr[5] = device_id & 0xff;
+
+ /* Fill in the driver function table */
+ ndev->open = &rionet_open;
+ ndev->hard_start_xmit = &rionet_start_xmit;
+ ndev->stop = &rionet_close;
+ ndev->get_stats = &rionet_stats;
+ ndev->mtu = RIO_MAX_MSG_SIZE - 14;
+ ndev->features = NETIF_F_LLTX;
+ SET_ETHTOOL_OPS(ndev, &rionet_ethtool_ops);
+
+ SET_MODULE_OWNER(ndev);
+
+ spin_lock_init(&rnet->lock);
+ spin_lock_init(&rnet->tx_lock);
+
+ rnet->msg_enable = RIONET_DEFAULT_MSGLEVEL;
+
+ rc = register_netdev(ndev);
+ if (rc != 0)
+ goto out;
+
+ printk("%s: %s %s Version %s, MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
+ ndev->name,
+ DRV_NAME,
+ DRV_DESC,
+ DRV_VERSION,
+ ndev->dev_addr[0], ndev->dev_addr[1], ndev->dev_addr[2],
+ ndev->dev_addr[3], ndev->dev_addr[4], ndev->dev_addr[5]);
+
+ out:
+ return rc;
+}
+
+/*
+ * XXX Make multi-net safe
+ */
+static int rionet_probe(struct rio_dev *rdev, const struct rio_device_id *id)
+{
+ int rc = -ENODEV;
+ u32 lpef, lsrc_ops, ldst_ops;
+ struct rionet_peer *peer;
+
+ /* If local device is not rionet capable, give up quickly */
+ if (!rionet_capable)
+ goto out;
+
+ /*
+ * First time through, make sure local device is rionet
+ * capable, setup netdev, and set flags so this is skipped
+ * on later probes
+ */
+ if (!rionet_check) {
+ rio_local_read_config_32(rdev->net->hport, RIO_PEF_CAR, &lpef);
+ rio_local_read_config_32(rdev->net->hport, RIO_SRC_OPS_CAR,
+ &lsrc_ops);
+ rio_local_read_config_32(rdev->net->hport, RIO_DST_OPS_CAR,
+ &ldst_ops);
+ if (!is_rionet_capable(lpef, lsrc_ops, ldst_ops)) {
+ printk(KERN_ERR
+ "%s: local device is not network capable\n",
+ DRV_NAME);
+ rionet_check = 1;
+ rionet_capable = 0;
+ goto out;
+ }
+
+ rc = rionet_setup_netdev(rdev->net->hport);
+ rionet_check = 1;
+ }
+
+ /*
+ * If the remote device has mailbox/doorbell capabilities,
+ * add it to the peer list.
+ */
+ if (dev_rionet_capable(rdev)) {
+ if (!(peer = kmalloc(sizeof(struct rionet_peer), GFP_KERNEL))) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ peer->rdev = rdev;
+ list_add_tail(&peer->node, &rionet_peers);
+ }
+
+ out:
+ return rc;
+}
+
+static struct rio_device_id rionet_id_table[] = {
+ {RIO_DEVICE(RIO_ANY_ID, RIO_ANY_ID)}
+};
+
+static struct rio_driver rionet_driver = {
+ .name = "rionet",
+ .id_table = rionet_id_table,
+ .probe = rionet_probe,
+ .remove = rionet_remove,
+};
+
+static int __init rionet_init(void)
+{
+ return rio_register_driver(&rionet_driver);
+}
+
+static void __exit rionet_exit(void)
+{
+ rio_unregister_driver(&rionet_driver);
+}
+
+module_init(rionet_init);
+module_exit(rionet_exit);
diff --git a/drivers/net/s2io-regs.h b/drivers/net/s2io-regs.h
index 7cefe5507b9..00179bc3437 100644
--- a/drivers/net/s2io-regs.h
+++ b/drivers/net/s2io-regs.h
@@ -814,6 +814,17 @@ typedef struct _XENA_dev_config {
u64 rxgxs_ber_0; /* CHANGED */
u64 rxgxs_ber_1; /* CHANGED */
+ u64 spi_control;
+#define SPI_CONTROL_KEY(key) vBIT(key,0,4)
+#define SPI_CONTROL_BYTECNT(cnt) vBIT(cnt,29,3)
+#define SPI_CONTROL_CMD(cmd) vBIT(cmd,32,8)
+#define SPI_CONTROL_ADDR(addr) vBIT(addr,40,24)
+#define SPI_CONTROL_SEL1 BIT(4)
+#define SPI_CONTROL_REQ BIT(7)
+#define SPI_CONTROL_NACK BIT(5)
+#define SPI_CONTROL_DONE BIT(6)
+ u64 spi_data;
+#define SPI_DATA_WRITE(data,len) vBIT(data,0,len)
} XENA_dev_config_t;
#define XENA_REG_SPACE sizeof(XENA_dev_config_t)
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index dd451e099a4..d303d162974 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -65,9 +65,11 @@
#include "s2io.h"
#include "s2io-regs.h"
+#define DRV_VERSION "Version 2.0.9.1"
+
/* S2io Driver name & version. */
static char s2io_driver_name[] = "Neterion";
-static char s2io_driver_version[] = "Version 2.0.8.1";
+static char s2io_driver_version[] = DRV_VERSION;
static inline int RXD_IS_UP2DT(RxD_t *rxdp)
{
@@ -307,6 +309,8 @@ static unsigned int indicate_max_pkts;
#endif
/* Frequency of Rx desc syncs expressed as power of 2 */
static unsigned int rxsync_frequency = 3;
+/* Interrupt type. Values can be 0(INTA), 1(MSI), 2(MSI_X) */
+static unsigned int intr_type = 0;
/*
* S2IO device table.
@@ -1396,8 +1400,13 @@ static int init_nic(struct s2io_nic *nic)
writeq(val64, &bar0->rti_data1_mem);
val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
- RTI_DATA2_MEM_RX_UFC_B(0x2) |
- RTI_DATA2_MEM_RX_UFC_C(0x40) | RTI_DATA2_MEM_RX_UFC_D(0x80);
+ RTI_DATA2_MEM_RX_UFC_B(0x2) ;
+ if (nic->intr_type == MSI_X)
+ val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) | \
+ RTI_DATA2_MEM_RX_UFC_D(0x40));
+ else
+ val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) | \
+ RTI_DATA2_MEM_RX_UFC_D(0x80));
writeq(val64, &bar0->rti_data2_mem);
for (i = 0; i < config->rx_ring_num; i++) {
@@ -1507,17 +1516,15 @@ static int init_nic(struct s2io_nic *nic)
#define LINK_UP_DOWN_INTERRUPT 1
#define MAC_RMAC_ERR_TIMER 2
-#if defined(CONFIG_MSI_MODE) || defined(CONFIG_MSIX_MODE)
-#define s2io_link_fault_indication(x) MAC_RMAC_ERR_TIMER
-#else
int s2io_link_fault_indication(nic_t *nic)
{
+ if (nic->intr_type != INTA)
+ return MAC_RMAC_ERR_TIMER;
if (nic->device_type == XFRAME_II_DEVICE)
return LINK_UP_DOWN_INTERRUPT;
else
return MAC_RMAC_ERR_TIMER;
}
-#endif
/**
* en_dis_able_nic_intrs - Enable or Disable the interrupts
@@ -1941,11 +1948,14 @@ static int start_nic(struct s2io_nic *nic)
}
/* Enable select interrupts */
- interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
- interruptible |= TX_PIC_INTR | RX_PIC_INTR;
- interruptible |= TX_MAC_INTR | RX_MAC_INTR;
-
- en_dis_able_nic_intrs(nic, interruptible, ENABLE_INTRS);
+ if (nic->intr_type != INTA)
+ en_dis_able_nic_intrs(nic, ENA_ALL_INTRS, DISABLE_INTRS);
+ else {
+ interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
+ interruptible |= TX_PIC_INTR | RX_PIC_INTR;
+ interruptible |= TX_MAC_INTR | RX_MAC_INTR;
+ en_dis_able_nic_intrs(nic, interruptible, ENABLE_INTRS);
+ }
/*
* With some switches, link might be already up at this point.
@@ -2633,11 +2643,11 @@ static void tx_intr_handler(fifo_info_t *fifo_data)
err = txdlp->Control_1 & TXD_T_CODE;
if ((err >> 48) == 0xA) {
DBG_PRINT(TX_DBG, "TxD returned due \
- to loss of link\n");
+to loss of link\n");
}
else {
DBG_PRINT(ERR_DBG, "***TxD error \
- %llx\n", err);
+%llx\n", err);
}
}
@@ -2854,6 +2864,9 @@ void s2io_reset(nic_t * sp)
/* Set swapper to enable I/O register access */
s2io_set_swapper(sp);
+ /* Restore the MSIX table entries from local variables */
+ restore_xmsi_data(sp);
+
/* Clear certain PCI/PCI-X fields after reset */
if (sp->device_type == XFRAME_II_DEVICE) {
/* Clear parity err detect bit */
@@ -2983,8 +2996,9 @@ int s2io_set_swapper(nic_t * sp)
SWAPPER_CTRL_RXD_W_FE |
SWAPPER_CTRL_RXF_W_FE |
SWAPPER_CTRL_XMSI_FE |
- SWAPPER_CTRL_XMSI_SE |
SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
+ if (sp->intr_type == INTA)
+ val64 |= SWAPPER_CTRL_XMSI_SE;
writeq(val64, &bar0->swapper_ctrl);
#else
/*
@@ -3005,8 +3019,9 @@ int s2io_set_swapper(nic_t * sp)
SWAPPER_CTRL_RXD_W_SE |
SWAPPER_CTRL_RXF_W_FE |
SWAPPER_CTRL_XMSI_FE |
- SWAPPER_CTRL_XMSI_SE |
SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
+ if (sp->intr_type == INTA)
+ val64 |= SWAPPER_CTRL_XMSI_SE;
writeq(val64, &bar0->swapper_ctrl);
#endif
val64 = readq(&bar0->swapper_ctrl);
@@ -3028,6 +3043,201 @@ int s2io_set_swapper(nic_t * sp)
return SUCCESS;
}
+int wait_for_msix_trans(nic_t *nic, int i)
+{
+ XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
+ u64 val64;
+ int ret = 0, cnt = 0;
+
+ do {
+ val64 = readq(&bar0->xmsi_access);
+ if (!(val64 & BIT(15)))
+ break;
+ mdelay(1);
+ cnt++;
+ } while(cnt < 5);
+ if (cnt == 5) {
+ DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
+ ret = 1;
+ }
+
+ return ret;
+}
+
+void restore_xmsi_data(nic_t *nic)
+{
+ XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
+ u64 val64;
+ int i;
+
+ for (i=0; i< MAX_REQUESTED_MSI_X; i++) {
+ writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
+ writeq(nic->msix_info[i].data, &bar0->xmsi_data);
+ val64 = (BIT(7) | BIT(15) | vBIT(i, 26, 6));
+ writeq(val64, &bar0->xmsi_access);
+ if (wait_for_msix_trans(nic, i)) {
+ DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
+ continue;
+ }
+ }
+}
+
+void store_xmsi_data(nic_t *nic)
+{
+ XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
+ u64 val64, addr, data;
+ int i;
+
+ /* Store and display */
+ for (i=0; i< MAX_REQUESTED_MSI_X; i++) {
+ val64 = (BIT(15) | vBIT(i, 26, 6));
+ writeq(val64, &bar0->xmsi_access);
+ if (wait_for_msix_trans(nic, i)) {
+ DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
+ continue;
+ }
+ addr = readq(&bar0->xmsi_address);
+ data = readq(&bar0->xmsi_data);
+ if (addr && data) {
+ nic->msix_info[i].addr = addr;
+ nic->msix_info[i].data = data;
+ }
+ }
+}
+
+int s2io_enable_msi(nic_t *nic)
+{
+ XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
+ u16 msi_ctrl, msg_val;
+ struct config_param *config = &nic->config;
+ struct net_device *dev = nic->dev;
+ u64 val64, tx_mat, rx_mat;
+ int i, err;
+
+ val64 = readq(&bar0->pic_control);
+ val64 &= ~BIT(1);
+ writeq(val64, &bar0->pic_control);
+
+ err = pci_enable_msi(nic->pdev);
+ if (err) {
+ DBG_PRINT(ERR_DBG, "%s: enabling MSI failed\n",
+ nic->dev->name);
+ return err;
+ }
+
+ /*
+ * Enable MSI and use MSI-1 in stead of the standard MSI-0
+ * for interrupt handling.
+ */
+ pci_read_config_word(nic->pdev, 0x4c, &msg_val);
+ msg_val ^= 0x1;
+ pci_write_config_word(nic->pdev, 0x4c, msg_val);
+ pci_read_config_word(nic->pdev, 0x4c, &msg_val);
+
+ pci_read_config_word(nic->pdev, 0x42, &msi_ctrl);
+ msi_ctrl |= 0x10;
+ pci_write_config_word(nic->pdev, 0x42, msi_ctrl);
+
+ /* program MSI-1 into all usable Tx_Mat and Rx_Mat fields */
+ tx_mat = readq(&bar0->tx_mat0_n[0]);
+ for (i=0; i<config->tx_fifo_num; i++) {
+ tx_mat |= TX_MAT_SET(i, 1);
+ }
+ writeq(tx_mat, &bar0->tx_mat0_n[0]);
+
+ rx_mat = readq(&bar0->rx_mat);
+ for (i=0; i<config->rx_ring_num; i++) {
+ rx_mat |= RX_MAT_SET(i, 1);
+ }
+ writeq(rx_mat, &bar0->rx_mat);
+
+ dev->irq = nic->pdev->irq;
+ return 0;
+}
+
+int s2io_enable_msi_x(nic_t *nic)
+{
+ XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
+ u64 tx_mat, rx_mat;
+ u16 msi_control; /* Temp variable */
+ int ret, i, j, msix_indx = 1;
+
+ nic->entries = kmalloc(MAX_REQUESTED_MSI_X * sizeof(struct msix_entry),
+ GFP_KERNEL);
+ if (nic->entries == NULL) {
+ DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", __FUNCTION__);
+ return -ENOMEM;
+ }
+ memset(nic->entries, 0, MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
+
+ nic->s2io_entries =
+ kmalloc(MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry),
+ GFP_KERNEL);
+ if (nic->s2io_entries == NULL) {
+ DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", __FUNCTION__);
+ kfree(nic->entries);
+ return -ENOMEM;
+ }
+ memset(nic->s2io_entries, 0,
+ MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
+
+ for (i=0; i< MAX_REQUESTED_MSI_X; i++) {
+ nic->entries[i].entry = i;
+ nic->s2io_entries[i].entry = i;
+ nic->s2io_entries[i].arg = NULL;
+ nic->s2io_entries[i].in_use = 0;
+ }
+
+ tx_mat = readq(&bar0->tx_mat0_n[0]);
+ for (i=0; i<nic->config.tx_fifo_num; i++, msix_indx++) {
+ tx_mat |= TX_MAT_SET(i, msix_indx);
+ nic->s2io_entries[msix_indx].arg = &nic->mac_control.fifos[i];
+ nic->s2io_entries[msix_indx].type = MSIX_FIFO_TYPE;
+ nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
+ }
+ writeq(tx_mat, &bar0->tx_mat0_n[0]);
+
+ if (!nic->config.bimodal) {
+ rx_mat = readq(&bar0->rx_mat);
+ for (j=0; j<nic->config.rx_ring_num; j++, msix_indx++) {
+ rx_mat |= RX_MAT_SET(j, msix_indx);
+ nic->s2io_entries[msix_indx].arg = &nic->mac_control.rings[j];
+ nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
+ nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
+ }
+ writeq(rx_mat, &bar0->rx_mat);
+ } else {
+ tx_mat = readq(&bar0->tx_mat0_n[7]);
+ for (j=0; j<nic->config.rx_ring_num; j++, msix_indx++) {
+ tx_mat |= TX_MAT_SET(i, msix_indx);
+ nic->s2io_entries[msix_indx].arg = &nic->mac_control.rings[j];
+ nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
+ nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
+ }
+ writeq(tx_mat, &bar0->tx_mat0_n[7]);
+ }
+
+ ret = pci_enable_msix(nic->pdev, nic->entries, MAX_REQUESTED_MSI_X);
+ if (ret) {
+ DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name);
+ kfree(nic->entries);
+ kfree(nic->s2io_entries);
+ nic->entries = NULL;
+ nic->s2io_entries = NULL;
+ return -ENOMEM;
+ }
+
+ /*
+ * To enable MSI-X, MSI also needs to be enabled, due to a bug
+ * in the herc NIC. (Temp change, needs to be removed later)
+ */
+ pci_read_config_word(nic->pdev, 0x42, &msi_control);
+ msi_control |= 0x1; /* Enable MSI */
+ pci_write_config_word(nic->pdev, 0x42, msi_control);
+
+ return 0;
+}
+
/* ********************************************************* *
* Functions defined below concern the OS part of the driver *
* ********************************************************* */
@@ -3048,6 +3258,8 @@ int s2io_open(struct net_device *dev)
{
nic_t *sp = dev->priv;
int err = 0;
+ int i;
+ u16 msi_control; /* Temp variable */
/*
* Make sure you have link off by default every time
@@ -3064,13 +3276,55 @@ int s2io_open(struct net_device *dev)
goto hw_init_failed;
}
+ /* Store the values of the MSIX table in the nic_t structure */
+ store_xmsi_data(sp);
+
/* After proper initialization of H/W, register ISR */
- err = request_irq((int) sp->pdev->irq, s2io_isr, SA_SHIRQ,
- sp->name, dev);
- if (err) {
- DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
- dev->name);
- goto isr_registration_failed;
+ if (sp->intr_type == MSI) {
+ err = request_irq((int) sp->pdev->irq, s2io_msi_handle,
+ SA_SHIRQ, sp->name, dev);
+ if (err) {
+ DBG_PRINT(ERR_DBG, "%s: MSI registration \
+failed\n", dev->name);
+ goto isr_registration_failed;
+ }
+ }
+ if (sp->intr_type == MSI_X) {
+ for (i=1; (sp->s2io_entries[i].in_use == MSIX_FLG); i++) {
+ if (sp->s2io_entries[i].type == MSIX_FIFO_TYPE) {
+ sprintf(sp->desc1, "%s:MSI-X-%d-TX",
+ dev->name, i);
+ err = request_irq(sp->entries[i].vector,
+ s2io_msix_fifo_handle, 0, sp->desc1,
+ sp->s2io_entries[i].arg);
+ DBG_PRINT(ERR_DBG, "%s @ 0x%llx\n", sp->desc1,
+ sp->msix_info[i].addr);
+ } else {
+ sprintf(sp->desc2, "%s:MSI-X-%d-RX",
+ dev->name, i);
+ err = request_irq(sp->entries[i].vector,
+ s2io_msix_ring_handle, 0, sp->desc2,
+ sp->s2io_entries[i].arg);
+ DBG_PRINT(ERR_DBG, "%s @ 0x%llx\n", sp->desc2,
+ sp->msix_info[i].addr);
+ }
+ if (err) {
+ DBG_PRINT(ERR_DBG, "%s: MSI-X-%d registration \
+failed\n", dev->name, i);
+ DBG_PRINT(ERR_DBG, "Returned: %d\n", err);
+ goto isr_registration_failed;
+ }
+ sp->s2io_entries[i].in_use = MSIX_REGISTERED_SUCCESS;
+ }
+ }
+ if (sp->intr_type == INTA) {
+ err = request_irq((int) sp->pdev->irq, s2io_isr, SA_SHIRQ,
+ sp->name, dev);
+ if (err) {
+ DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
+ dev->name);
+ goto isr_registration_failed;
+ }
}
if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) {
@@ -3083,11 +3337,37 @@ int s2io_open(struct net_device *dev)
return 0;
setting_mac_address_failed:
- free_irq(sp->pdev->irq, dev);
+ if (sp->intr_type != MSI_X)
+ free_irq(sp->pdev->irq, dev);
isr_registration_failed:
del_timer_sync(&sp->alarm_timer);
+ if (sp->intr_type == MSI_X) {
+ if (sp->device_type == XFRAME_II_DEVICE) {
+ for (i=1; (sp->s2io_entries[i].in_use ==
+ MSIX_REGISTERED_SUCCESS); i++) {
+ int vector = sp->entries[i].vector;
+ void *arg = sp->s2io_entries[i].arg;
+
+ free_irq(vector, arg);
+ }
+ pci_disable_msix(sp->pdev);
+
+ /* Temp */
+ pci_read_config_word(sp->pdev, 0x42, &msi_control);
+ msi_control &= 0xFFFE; /* Disable MSI */
+ pci_write_config_word(sp->pdev, 0x42, msi_control);
+ }
+ }
+ else if (sp->intr_type == MSI)
+ pci_disable_msi(sp->pdev);
s2io_reset(sp);
hw_init_failed:
+ if (sp->intr_type == MSI_X) {
+ if (sp->entries)
+ kfree(sp->entries);
+ if (sp->s2io_entries)
+ kfree(sp->s2io_entries);
+ }
return err;
}
@@ -3107,12 +3387,35 @@ hw_init_failed:
int s2io_close(struct net_device *dev)
{
nic_t *sp = dev->priv;
+ int i;
+ u16 msi_control;
+
flush_scheduled_work();
netif_stop_queue(dev);
/* Reset card, kill tasklet and free Tx and Rx buffers. */
s2io_card_down(sp);
- free_irq(sp->pdev->irq, dev);
+ if (sp->intr_type == MSI_X) {
+ if (sp->device_type == XFRAME_II_DEVICE) {
+ for (i=1; (sp->s2io_entries[i].in_use ==
+ MSIX_REGISTERED_SUCCESS); i++) {
+ int vector = sp->entries[i].vector;
+ void *arg = sp->s2io_entries[i].arg;
+
+ free_irq(vector, arg);
+ }
+ pci_read_config_word(sp->pdev, 0x42, &msi_control);
+ msi_control &= 0xFFFE; /* Disable MSI */
+ pci_write_config_word(sp->pdev, 0x42, msi_control);
+
+ pci_disable_msix(sp->pdev);
+ }
+ }
+ else {
+ free_irq(sp->pdev->irq, dev);
+ if (sp->intr_type == MSI)
+ pci_disable_msi(sp->pdev);
+ }
sp->device_close_flag = TRUE; /* Device is shut down. */
return 0;
}
@@ -3278,6 +3581,104 @@ s2io_alarm_handle(unsigned long data)
mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
}
+static irqreturn_t
+s2io_msi_handle(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *) dev_id;
+ nic_t *sp = dev->priv;
+ int i;
+ int ret;
+ mac_info_t *mac_control;
+ struct config_param *config;
+
+ atomic_inc(&sp->isr_cnt);
+ mac_control = &sp->mac_control;
+ config = &sp->config;
+ DBG_PRINT(INTR_DBG, "%s: MSI handler\n", __FUNCTION__);
+
+ /* If Intr is because of Rx Traffic */
+ for (i = 0; i < config->rx_ring_num; i++)
+ rx_intr_handler(&mac_control->rings[i]);
+
+ /* If Intr is because of Tx Traffic */
+ for (i = 0; i < config->tx_fifo_num; i++)
+ tx_intr_handler(&mac_control->fifos[i]);
+
+ /*
+ * If the Rx buffer count is below the panic threshold then
+ * reallocate the buffers from the interrupt handler itself,
+ * else schedule a tasklet to reallocate the buffers.
+ */
+ for (i = 0; i < config->rx_ring_num; i++) {
+ int rxb_size = atomic_read(&sp->rx_bufs_left[i]);
+ int level = rx_buffer_level(sp, rxb_size, i);
+
+ if ((level == PANIC) && (!TASKLET_IN_USE)) {
+ DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", dev->name);
+ DBG_PRINT(INTR_DBG, "PANIC levels\n");
+ if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) {
+ DBG_PRINT(ERR_DBG, "%s:Out of memory",
+ dev->name);
+ DBG_PRINT(ERR_DBG, " in ISR!!\n");
+ clear_bit(0, (&sp->tasklet_status));
+ atomic_dec(&sp->isr_cnt);
+ return IRQ_HANDLED;
+ }
+ clear_bit(0, (&sp->tasklet_status));
+ } else if (level == LOW) {
+ tasklet_schedule(&sp->task);
+ }
+ }
+
+ atomic_dec(&sp->isr_cnt);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t
+s2io_msix_ring_handle(int irq, void *dev_id, struct pt_regs *regs)
+{
+ ring_info_t *ring = (ring_info_t *)dev_id;
+ nic_t *sp = ring->nic;
+ int rxb_size, level, rng_n;
+
+ atomic_inc(&sp->isr_cnt);
+ rx_intr_handler(ring);
+
+ rng_n = ring->ring_no;
+ rxb_size = atomic_read(&sp->rx_bufs_left[rng_n]);
+ level = rx_buffer_level(sp, rxb_size, rng_n);
+
+ if ((level == PANIC) && (!TASKLET_IN_USE)) {
+ int ret;
+ DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", __FUNCTION__);
+ DBG_PRINT(INTR_DBG, "PANIC levels\n");
+ if ((ret = fill_rx_buffers(sp, rng_n)) == -ENOMEM) {
+ DBG_PRINT(ERR_DBG, "Out of memory in %s",
+ __FUNCTION__);
+ clear_bit(0, (&sp->tasklet_status));
+ return IRQ_HANDLED;
+ }
+ clear_bit(0, (&sp->tasklet_status));
+ } else if (level == LOW) {
+ tasklet_schedule(&sp->task);
+ }
+ atomic_dec(&sp->isr_cnt);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t
+s2io_msix_fifo_handle(int irq, void *dev_id, struct pt_regs *regs)
+{
+ fifo_info_t *fifo = (fifo_info_t *)dev_id;
+ nic_t *sp = fifo->nic;
+
+ atomic_inc(&sp->isr_cnt);
+ tx_intr_handler(fifo);
+ atomic_dec(&sp->isr_cnt);
+ return IRQ_HANDLED;
+}
+
static void s2io_txpic_intr_handle(nic_t *sp)
{
XENA_dev_config_t __iomem *bar0 = sp->bar0;
@@ -3778,11 +4179,10 @@ static void s2io_ethtool_gdrvinfo(struct net_device *dev,
{
nic_t *sp = dev->priv;
- strncpy(info->driver, s2io_driver_name, sizeof(s2io_driver_name));
- strncpy(info->version, s2io_driver_version,
- sizeof(s2io_driver_version));
- strncpy(info->fw_version, "", 32);
- strncpy(info->bus_info, pci_name(sp->pdev), 32);
+ strncpy(info->driver, s2io_driver_name, sizeof(info->driver));
+ strncpy(info->version, s2io_driver_version, sizeof(info->version));
+ strncpy(info->fw_version, "", sizeof(info->fw_version));
+ strncpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
info->regdump_len = XENA_REG_SPACE;
info->eedump_len = XENA_EEPROM_SPACE;
info->testinfo_len = S2IO_TEST_LEN;
@@ -3978,29 +4378,53 @@ static int s2io_ethtool_setpause_data(struct net_device *dev,
*/
#define S2IO_DEV_ID 5
-static int read_eeprom(nic_t * sp, int off, u32 * data)
+static int read_eeprom(nic_t * sp, int off, u64 * data)
{
int ret = -1;
u32 exit_cnt = 0;
u64 val64;
XENA_dev_config_t __iomem *bar0 = sp->bar0;
- val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
- I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
- I2C_CONTROL_CNTL_START;
- SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
+ if (sp->device_type == XFRAME_I_DEVICE) {
+ val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
+ I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
+ I2C_CONTROL_CNTL_START;
+ SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
- while (exit_cnt < 5) {
- val64 = readq(&bar0->i2c_control);
- if (I2C_CONTROL_CNTL_END(val64)) {
- *data = I2C_CONTROL_GET_DATA(val64);
- ret = 0;
- break;
+ while (exit_cnt < 5) {
+ val64 = readq(&bar0->i2c_control);
+ if (I2C_CONTROL_CNTL_END(val64)) {
+ *data = I2C_CONTROL_GET_DATA(val64);
+ ret = 0;
+ break;
+ }
+ msleep(50);
+ exit_cnt++;
}
- msleep(50);
- exit_cnt++;
}
+ if (sp->device_type == XFRAME_II_DEVICE) {
+ val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
+ SPI_CONTROL_BYTECNT(0x3) |
+ SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
+ SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
+ val64 |= SPI_CONTROL_REQ;
+ SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
+ while (exit_cnt < 5) {
+ val64 = readq(&bar0->spi_control);
+ if (val64 & SPI_CONTROL_NACK) {
+ ret = 1;
+ break;
+ } else if (val64 & SPI_CONTROL_DONE) {
+ *data = readq(&bar0->spi_data);
+ *data &= 0xffffff;
+ ret = 0;
+ break;
+ }
+ msleep(50);
+ exit_cnt++;
+ }
+ }
return ret;
}
@@ -4019,28 +4443,53 @@ static int read_eeprom(nic_t * sp, int off, u32 * data)
* 0 on success, -1 on failure.
*/
-static int write_eeprom(nic_t * sp, int off, u32 data, int cnt)
+static int write_eeprom(nic_t * sp, int off, u64 data, int cnt)
{
int exit_cnt = 0, ret = -1;
u64 val64;
XENA_dev_config_t __iomem *bar0 = sp->bar0;
- val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
- I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA(data) |
- I2C_CONTROL_CNTL_START;
- SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
+ if (sp->device_type == XFRAME_I_DEVICE) {
+ val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
+ I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA((u32)data) |
+ I2C_CONTROL_CNTL_START;
+ SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
+
+ while (exit_cnt < 5) {
+ val64 = readq(&bar0->i2c_control);
+ if (I2C_CONTROL_CNTL_END(val64)) {
+ if (!(val64 & I2C_CONTROL_NACK))
+ ret = 0;
+ break;
+ }
+ msleep(50);
+ exit_cnt++;
+ }
+ }
- while (exit_cnt < 5) {
- val64 = readq(&bar0->i2c_control);
- if (I2C_CONTROL_CNTL_END(val64)) {
- if (!(val64 & I2C_CONTROL_NACK))
+ if (sp->device_type == XFRAME_II_DEVICE) {
+ int write_cnt = (cnt == 8) ? 0 : cnt;
+ writeq(SPI_DATA_WRITE(data,(cnt<<3)), &bar0->spi_data);
+
+ val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
+ SPI_CONTROL_BYTECNT(write_cnt) |
+ SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
+ SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
+ val64 |= SPI_CONTROL_REQ;
+ SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
+ while (exit_cnt < 5) {
+ val64 = readq(&bar0->spi_control);
+ if (val64 & SPI_CONTROL_NACK) {
+ ret = 1;
+ break;
+ } else if (val64 & SPI_CONTROL_DONE) {
ret = 0;
- break;
+ break;
+ }
+ msleep(50);
+ exit_cnt++;
}
- msleep(50);
- exit_cnt++;
}
-
return ret;
}
@@ -4060,7 +4509,8 @@ static int write_eeprom(nic_t * sp, int off, u32 data, int cnt)
static int s2io_ethtool_geeprom(struct net_device *dev,
struct ethtool_eeprom *eeprom, u8 * data_buf)
{
- u32 data, i, valid;
+ u32 i, valid;
+ u64 data;
nic_t *sp = dev->priv;
eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
@@ -4098,7 +4548,7 @@ static int s2io_ethtool_seeprom(struct net_device *dev,
u8 * data_buf)
{
int len = eeprom->len, cnt = 0;
- u32 valid = 0, data;
+ u64 valid = 0, data;
nic_t *sp = dev->priv;
if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
@@ -4146,7 +4596,7 @@ static int s2io_ethtool_seeprom(struct net_device *dev,
static int s2io_register_test(nic_t * sp, uint64_t * data)
{
XENA_dev_config_t __iomem *bar0 = sp->bar0;
- u64 val64 = 0;
+ u64 val64 = 0, exp_val;
int fail = 0;
val64 = readq(&bar0->pif_rd_swapper_fb);
@@ -4162,7 +4612,11 @@ static int s2io_register_test(nic_t * sp, uint64_t * data)
}
val64 = readq(&bar0->rx_queue_cfg);
- if (val64 != 0x0808080808080808ULL) {
+ if (sp->device_type == XFRAME_II_DEVICE)
+ exp_val = 0x0404040404040404ULL;
+ else
+ exp_val = 0x0808080808080808ULL;
+ if (val64 != exp_val) {
fail = 1;
DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
}
@@ -4190,7 +4644,7 @@ static int s2io_register_test(nic_t * sp, uint64_t * data)
}
*data = fail;
- return 0;
+ return fail;
}
/**
@@ -4209,58 +4663,83 @@ static int s2io_register_test(nic_t * sp, uint64_t * data)
static int s2io_eeprom_test(nic_t * sp, uint64_t * data)
{
int fail = 0;
- u32 ret_data;
+ u64 ret_data, org_4F0, org_7F0;
+ u8 saved_4F0 = 0, saved_7F0 = 0;
+ struct net_device *dev = sp->dev;
/* Test Write Error at offset 0 */
- if (!write_eeprom(sp, 0, 0, 3))
- fail = 1;
+ /* Note that SPI interface allows write access to all areas
+ * of EEPROM. Hence doing all negative testing only for Xframe I.
+ */
+ if (sp->device_type == XFRAME_I_DEVICE)
+ if (!write_eeprom(sp, 0, 0, 3))
+ fail = 1;
+
+ /* Save current values at offsets 0x4F0 and 0x7F0 */
+ if (!read_eeprom(sp, 0x4F0, &org_4F0))
+ saved_4F0 = 1;
+ if (!read_eeprom(sp, 0x7F0, &org_7F0))
+ saved_7F0 = 1;
/* Test Write at offset 4f0 */
- if (write_eeprom(sp, 0x4F0, 0x01234567, 3))
+ if (write_eeprom(sp, 0x4F0, 0x012345, 3))
fail = 1;
if (read_eeprom(sp, 0x4F0, &ret_data))
fail = 1;
- if (ret_data != 0x01234567)
+ if (ret_data != 0x012345) {
+ DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. Data written %llx Data read %llx\n", dev->name, (u64)0x12345, ret_data);
fail = 1;
+ }
/* Reset the EEPROM data go FFFF */
- write_eeprom(sp, 0x4F0, 0xFFFFFFFF, 3);
+ write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
/* Test Write Request Error at offset 0x7c */
- if (!write_eeprom(sp, 0x07C, 0, 3))
- fail = 1;
+ if (sp->device_type == XFRAME_I_DEVICE)
+ if (!write_eeprom(sp, 0x07C, 0, 3))
+ fail = 1;
- /* Test Write Request at offset 0x7fc */
- if (write_eeprom(sp, 0x7FC, 0x01234567, 3))
+ /* Test Write Request at offset 0x7f0 */
+ if (write_eeprom(sp, 0x7F0, 0x012345, 3))
fail = 1;
- if (read_eeprom(sp, 0x7FC, &ret_data))
+ if (read_eeprom(sp, 0x7F0, &ret_data))
fail = 1;
- if (ret_data != 0x01234567)
+ if (ret_data != 0x012345) {
+ DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. Data written %llx Data read %llx\n", dev->name, (u64)0x12345, ret_data);
fail = 1;
+ }
/* Reset the EEPROM data go FFFF */
- write_eeprom(sp, 0x7FC, 0xFFFFFFFF, 3);
+ write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
- /* Test Write Error at offset 0x80 */
- if (!write_eeprom(sp, 0x080, 0, 3))
- fail = 1;
+ if (sp->device_type == XFRAME_I_DEVICE) {
+ /* Test Write Error at offset 0x80 */
+ if (!write_eeprom(sp, 0x080, 0, 3))
+ fail = 1;
- /* Test Write Error at offset 0xfc */
- if (!write_eeprom(sp, 0x0FC, 0, 3))
- fail = 1;
+ /* Test Write Error at offset 0xfc */
+ if (!write_eeprom(sp, 0x0FC, 0, 3))
+ fail = 1;
- /* Test Write Error at offset 0x100 */
- if (!write_eeprom(sp, 0x100, 0, 3))
- fail = 1;
+ /* Test Write Error at offset 0x100 */
+ if (!write_eeprom(sp, 0x100, 0, 3))
+ fail = 1;
- /* Test Write Error at offset 4ec */
- if (!write_eeprom(sp, 0x4EC, 0, 3))
- fail = 1;
+ /* Test Write Error at offset 4ec */
+ if (!write_eeprom(sp, 0x4EC, 0, 3))
+ fail = 1;
+ }
+
+ /* Restore values at offsets 0x4F0 and 0x7F0 */
+ if (saved_4F0)
+ write_eeprom(sp, 0x4F0, org_4F0, 3);
+ if (saved_7F0)
+ write_eeprom(sp, 0x7F0, org_7F0, 3);
*data = fail;
- return 0;
+ return fail;
}
/**
@@ -4342,7 +4821,7 @@ static int s2io_rldram_test(nic_t * sp, uint64_t * data)
{
XENA_dev_config_t __iomem *bar0 = sp->bar0;
u64 val64;
- int cnt, iteration = 0, test_pass = 0;
+ int cnt, iteration = 0, test_fail = 0;
val64 = readq(&bar0->adapter_control);
val64 &= ~ADAPTER_ECC_EN;
@@ -4350,7 +4829,7 @@ static int s2io_rldram_test(nic_t * sp, uint64_t * data)
val64 = readq(&bar0->mc_rldram_test_ctrl);
val64 |= MC_RLDRAM_TEST_MODE;
- writeq(val64, &bar0->mc_rldram_test_ctrl);
+ SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
val64 = readq(&bar0->mc_rldram_mrs);
val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
@@ -4378,17 +4857,12 @@ static int s2io_rldram_test(nic_t * sp, uint64_t * data)
}
writeq(val64, &bar0->mc_rldram_test_d2);
- val64 = (u64) (0x0000003fffff0000ULL);
+ val64 = (u64) (0x0000003ffffe0100ULL);
writeq(val64, &bar0->mc_rldram_test_add);
-
- val64 = MC_RLDRAM_TEST_MODE;
- writeq(val64, &bar0->mc_rldram_test_ctrl);
-
- val64 |=
- MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
- MC_RLDRAM_TEST_GO;
- writeq(val64, &bar0->mc_rldram_test_ctrl);
+ val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
+ MC_RLDRAM_TEST_GO;
+ SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
for (cnt = 0; cnt < 5; cnt++) {
val64 = readq(&bar0->mc_rldram_test_ctrl);
@@ -4400,11 +4874,8 @@ static int s2io_rldram_test(nic_t * sp, uint64_t * data)
if (cnt == 5)
break;
- val64 = MC_RLDRAM_TEST_MODE;
- writeq(val64, &bar0->mc_rldram_test_ctrl);
-
- val64 |= MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
- writeq(val64, &bar0->mc_rldram_test_ctrl);
+ val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
+ SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
for (cnt = 0; cnt < 5; cnt++) {
val64 = readq(&bar0->mc_rldram_test_ctrl);
@@ -4417,18 +4888,18 @@ static int s2io_rldram_test(nic_t * sp, uint64_t * data)
break;
val64 = readq(&bar0->mc_rldram_test_ctrl);
- if (val64 & MC_RLDRAM_TEST_PASS)
- test_pass = 1;
+ if (!(val64 & MC_RLDRAM_TEST_PASS))
+ test_fail = 1;
iteration++;
}
- if (!test_pass)
- *data = 1;
- else
- *data = 0;
+ *data = test_fail;
- return 0;
+ /* Bring the adapter out of test mode */
+ SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
+
+ return test_fail;
}
/**
@@ -4932,7 +5403,7 @@ static void s2io_card_down(nic_t * sp)
static int s2io_card_up(nic_t * sp)
{
- int i, ret;
+ int i, ret = 0;
mac_info_t *mac_control;
struct config_param *config;
struct net_device *dev = (struct net_device *) sp->dev;
@@ -4944,6 +5415,15 @@ static int s2io_card_up(nic_t * sp)
return -ENODEV;
}
+ if (sp->intr_type == MSI)
+ ret = s2io_enable_msi(sp);
+ else if (sp->intr_type == MSI_X)
+ ret = s2io_enable_msi_x(sp);
+ if (ret) {
+ DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
+ sp->intr_type = INTA;
+ }
+
/*
* Initializing the Rx buffers. For now we are considering only 1
* Rx ring and initializing buffers into 30 Rx blocks
@@ -5228,6 +5708,8 @@ static void s2io_init_pci(nic_t * sp)
MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>");
MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
module_param(tx_fifo_num, int, 0);
module_param(rx_ring_num, int, 0);
module_param_array(tx_fifo_len, uint, NULL, 0);
@@ -5245,6 +5727,7 @@ module_param(bimodal, bool, 0);
module_param(indicate_max_pkts, int, 0);
#endif
module_param(rxsync_frequency, int, 0);
+module_param(intr_type, int, 0);
/**
* s2io_init_nic - Initialization of the adapter .
@@ -5274,9 +5757,16 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
mac_info_t *mac_control;
struct config_param *config;
int mode;
+ u8 dev_intr_type = intr_type;
#ifdef CONFIG_S2IO_NAPI
- DBG_PRINT(ERR_DBG, "NAPI support has been enabled\n");
+ if (dev_intr_type != INTA) {
+ DBG_PRINT(ERR_DBG, "NAPI cannot be enabled when MSI/MSI-X \
+is enabled. Defaulting to INTA\n");
+ dev_intr_type = INTA;
+ }
+ else
+ DBG_PRINT(ERR_DBG, "NAPI support has been enabled\n");
#endif
if ((ret = pci_enable_device(pdev))) {
@@ -5303,10 +5793,35 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
return -ENOMEM;
}
- if (pci_request_regions(pdev, s2io_driver_name)) {
- DBG_PRINT(ERR_DBG, "Request Regions failed\n"),
- pci_disable_device(pdev);
- return -ENODEV;
+ if ((dev_intr_type == MSI_X) &&
+ ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
+ (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
+ DBG_PRINT(ERR_DBG, "Xframe I does not support MSI_X. \
+Defaulting to INTA\n");
+ dev_intr_type = INTA;
+ }
+ if (dev_intr_type != MSI_X) {
+ if (pci_request_regions(pdev, s2io_driver_name)) {
+ DBG_PRINT(ERR_DBG, "Request Regions failed\n"),
+ pci_disable_device(pdev);
+ return -ENODEV;
+ }
+ }
+ else {
+ if (!(request_mem_region(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0), s2io_driver_name))) {
+ DBG_PRINT(ERR_DBG, "bar0 Request Regions failed\n");
+ pci_disable_device(pdev);
+ return -ENODEV;
+ }
+ if (!(request_mem_region(pci_resource_start(pdev, 2),
+ pci_resource_len(pdev, 2), s2io_driver_name))) {
+ DBG_PRINT(ERR_DBG, "bar1 Request Regions failed\n");
+ release_mem_region(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ pci_disable_device(pdev);
+ return -ENODEV;
+ }
}
dev = alloc_etherdev(sizeof(nic_t));
@@ -5329,6 +5844,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
sp->pdev = pdev;
sp->high_dma_flag = dma_flag;
sp->device_enabled_once = FALSE;
+ sp->intr_type = dev_intr_type;
if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
(pdev->device == PCI_DEVICE_ID_HERC_UNI))
@@ -5336,6 +5852,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
else
sp->device_type = XFRAME_I_DEVICE;
+
/* Initialize some PCI/PCI-X fields of the NIC. */
s2io_init_pci(sp);
@@ -5571,12 +6088,23 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
if (sp->device_type & XFRAME_II_DEVICE) {
DBG_PRINT(ERR_DBG, "%s: Neterion Xframe II 10GbE adapter ",
dev->name);
- DBG_PRINT(ERR_DBG, "(rev %d), %s",
+ DBG_PRINT(ERR_DBG, "(rev %d), Version %s",
get_xena_rev_id(sp->pdev),
s2io_driver_version);
#ifdef CONFIG_2BUFF_MODE
DBG_PRINT(ERR_DBG, ", Buffer mode %d",2);
#endif
+ switch(sp->intr_type) {
+ case INTA:
+ DBG_PRINT(ERR_DBG, ", Intr type INTA");
+ break;
+ case MSI:
+ DBG_PRINT(ERR_DBG, ", Intr type MSI");
+ break;
+ case MSI_X:
+ DBG_PRINT(ERR_DBG, ", Intr type MSI-X");
+ break;
+ }
DBG_PRINT(ERR_DBG, "\nCopyright(c) 2002-2005 Neterion Inc.\n");
DBG_PRINT(ERR_DBG, "MAC ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n",
@@ -5595,12 +6123,23 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
} else {
DBG_PRINT(ERR_DBG, "%s: Neterion Xframe I 10GbE adapter ",
dev->name);
- DBG_PRINT(ERR_DBG, "(rev %d), %s",
+ DBG_PRINT(ERR_DBG, "(rev %d), Version %s",
get_xena_rev_id(sp->pdev),
s2io_driver_version);
#ifdef CONFIG_2BUFF_MODE
DBG_PRINT(ERR_DBG, ", Buffer mode %d",2);
#endif
+ switch(sp->intr_type) {
+ case INTA:
+ DBG_PRINT(ERR_DBG, ", Intr type INTA");
+ break;
+ case MSI:
+ DBG_PRINT(ERR_DBG, ", Intr type MSI");
+ break;
+ case MSI_X:
+ DBG_PRINT(ERR_DBG, ", Intr type MSI-X");
+ break;
+ }
DBG_PRINT(ERR_DBG, "\nCopyright(c) 2002-2005 Neterion Inc.\n");
DBG_PRINT(ERR_DBG, "MAC ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n",
sp->def_mac_addr[0].mac_addr[0],
@@ -5644,7 +6183,14 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
mem_alloc_failed:
free_shared_mem(sp);
pci_disable_device(pdev);
- pci_release_regions(pdev);
+ if (dev_intr_type != MSI_X)
+ pci_release_regions(pdev);
+ else {
+ release_mem_region(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ release_mem_region(pci_resource_start(pdev, 2),
+ pci_resource_len(pdev, 2));
+ }
pci_set_drvdata(pdev, NULL);
free_netdev(dev);
@@ -5678,7 +6224,14 @@ static void __devexit s2io_rem_nic(struct pci_dev *pdev)
iounmap(sp->bar0);
iounmap(sp->bar1);
pci_disable_device(pdev);
- pci_release_regions(pdev);
+ if (sp->intr_type != MSI_X)
+ pci_release_regions(pdev);
+ else {
+ release_mem_region(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ release_mem_region(pci_resource_start(pdev, 2),
+ pci_resource_len(pdev, 2));
+ }
pci_set_drvdata(pdev, NULL);
free_netdev(dev);
}
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h
index 89151cb5218..1cc24b56760 100644
--- a/drivers/net/s2io.h
+++ b/drivers/net/s2io.h
@@ -652,6 +652,30 @@ typedef struct {
#define SMALL_BLK_CNT 30
#define LARGE_BLK_CNT 100
+/*
+ * Structure to keep track of the MSI-X vectors and the corresponding
+ * argument registered against each vector
+ */
+#define MAX_REQUESTED_MSI_X 17
+struct s2io_msix_entry
+{
+ u16 vector;
+ u16 entry;
+ void *arg;
+
+ u8 type;
+#define MSIX_FIFO_TYPE 1
+#define MSIX_RING_TYPE 2
+
+ u8 in_use;
+#define MSIX_REGISTERED_SUCCESS 0xAA
+};
+
+struct msix_info_st {
+ u64 addr;
+ u64 data;
+};
+
/* Structure representing one instance of the NIC */
struct s2io_nic {
#ifdef CONFIG_S2IO_NAPI
@@ -719,13 +743,8 @@ struct s2io_nic {
* a schedule task that will set the correct Link state once the
* NIC's PHY has stabilized after a state change.
*/
-#ifdef INIT_TQUEUE
- struct tq_struct rst_timer_task;
- struct tq_struct set_link_task;
-#else
struct work_struct rst_timer_task;
struct work_struct set_link_task;
-#endif
/* Flag that can be used to turn on or turn off the Rx checksum
* offload feature.
@@ -748,10 +767,23 @@ struct s2io_nic {
atomic_t card_state;
volatile unsigned long link_state;
struct vlan_group *vlgrp;
+#define MSIX_FLG 0xA5
+ struct msix_entry *entries;
+ struct s2io_msix_entry *s2io_entries;
+ char desc1[35];
+ char desc2[35];
+
+ struct msix_info_st msix_info[0x3f];
+
#define XFRAME_I_DEVICE 1
#define XFRAME_II_DEVICE 2
u8 device_type;
+#define INTA 0
+#define MSI 1
+#define MSI_X 2
+ u8 intr_type;
+
spinlock_t rx_lock;
atomic_t isr_cnt;
};
@@ -886,6 +918,13 @@ static int s2io_poll(struct net_device *dev, int *budget);
static void s2io_init_pci(nic_t * sp);
int s2io_set_mac_addr(struct net_device *dev, u8 * addr);
static void s2io_alarm_handle(unsigned long data);
+static int s2io_enable_msi(nic_t *nic);
+static irqreturn_t s2io_msi_handle(int irq, void *dev_id, struct pt_regs *regs);
+static irqreturn_t
+s2io_msix_ring_handle(int irq, void *dev_id, struct pt_regs *regs);
+static irqreturn_t
+s2io_msix_fifo_handle(int irq, void *dev_id, struct pt_regs *regs);
+int s2io_enable_msi_x(nic_t *nic);
static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs);
static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag);
static struct ethtool_ops netdev_ethtool_ops;
@@ -894,4 +933,5 @@ int s2io_set_swapper(nic_t * sp);
static void s2io_card_down(nic_t *nic);
static int s2io_card_up(nic_t *nic);
int get_xena_rev_id(struct pci_dev *pdev);
+void restore_xmsi_data(nic_t *nic);
#endif /* _S2IO_H */
diff --git a/drivers/net/sb1250-mac.c b/drivers/net/sb1250-mac.c
index 7abd55a4fb2..aa4ca182175 100644
--- a/drivers/net/sb1250-mac.c
+++ b/drivers/net/sb1250-mac.c
@@ -10,7 +10,7 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
+ *
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
@@ -118,8 +118,6 @@ MODULE_PARM_DESC(int_timeout, "Timeout value");
********************************************************************* */
-typedef unsigned long sbmac_port_t;
-
typedef enum { sbmac_speed_auto, sbmac_speed_10,
sbmac_speed_100, sbmac_speed_1000 } sbmac_speed_t;
@@ -129,7 +127,7 @@ typedef enum { sbmac_duplex_auto, sbmac_duplex_half,
typedef enum { sbmac_fc_auto, sbmac_fc_disabled, sbmac_fc_frame,
sbmac_fc_collision, sbmac_fc_carrier } sbmac_fc_t;
-typedef enum { sbmac_state_uninit, sbmac_state_off, sbmac_state_on,
+typedef enum { sbmac_state_uninit, sbmac_state_off, sbmac_state_on,
sbmac_state_broken } sbmac_state_t;
@@ -144,17 +142,13 @@ typedef enum { sbmac_state_uninit, sbmac_state_off, sbmac_state_on,
#define NUMCACHEBLKS(x) (((x)+SMP_CACHE_BYTES-1)/SMP_CACHE_BYTES)
-#define SBMAC_READCSR(t) __raw_readq((unsigned long)t)
-#define SBMAC_WRITECSR(t,v) __raw_writeq(v, (unsigned long)t)
-
-
#define SBMAC_MAX_TXDESCR 32
#define SBMAC_MAX_RXDESCR 32
#define ETHER_ALIGN 2
#define ETHER_ADDR_LEN 6
-#define ENET_PACKET_SIZE 1518
-/*#define ENET_PACKET_SIZE 9216 */
+#define ENET_PACKET_SIZE 1518
+/*#define ENET_PACKET_SIZE 9216 */
/**********************************************************************
* DMA Descriptor structure
@@ -172,12 +166,12 @@ typedef unsigned long paddr_t;
********************************************************************* */
typedef struct sbmacdma_s {
-
- /*
+
+ /*
* This stuff is used to identify the channel and the registers
* associated with it.
*/
-
+
struct sbmac_softc *sbdma_eth; /* back pointer to associated MAC */
int sbdma_channel; /* channel number */
int sbdma_txdir; /* direction (1=transmit) */
@@ -187,21 +181,21 @@ typedef struct sbmacdma_s {
int sbdma_int_timeout; /* # usec rx/tx interrupt */
#endif
- sbmac_port_t sbdma_config0; /* DMA config register 0 */
- sbmac_port_t sbdma_config1; /* DMA config register 1 */
- sbmac_port_t sbdma_dscrbase; /* Descriptor base address */
- sbmac_port_t sbdma_dscrcnt; /* Descriptor count register */
- sbmac_port_t sbdma_curdscr; /* current descriptor address */
-
+ volatile void __iomem *sbdma_config0; /* DMA config register 0 */
+ volatile void __iomem *sbdma_config1; /* DMA config register 1 */
+ volatile void __iomem *sbdma_dscrbase; /* Descriptor base address */
+ volatile void __iomem *sbdma_dscrcnt; /* Descriptor count register */
+ volatile void __iomem *sbdma_curdscr; /* current descriptor address */
+
/*
* This stuff is for maintenance of the ring
*/
-
+
sbdmadscr_t *sbdma_dscrtable; /* base of descriptor table */
sbdmadscr_t *sbdma_dscrtable_end; /* end of descriptor table */
-
+
struct sk_buff **sbdma_ctxtable; /* context table, one per descr */
-
+
paddr_t sbdma_dscrtable_phys; /* and also the phys addr */
sbdmadscr_t *sbdma_addptr; /* next dscr for sw to add */
sbdmadscr_t *sbdma_remptr; /* next dscr for sw to remove */
@@ -213,15 +207,15 @@ typedef struct sbmacdma_s {
********************************************************************* */
struct sbmac_softc {
-
+
/*
* Linux-specific things
*/
-
+
struct net_device *sbm_dev; /* pointer to linux device */
spinlock_t sbm_lock; /* spin lock */
struct timer_list sbm_timer; /* for monitoring MII */
- struct net_device_stats sbm_stats;
+ struct net_device_stats sbm_stats;
int sbm_devflags; /* current device flags */
int sbm_phy_oldbmsr;
@@ -229,31 +223,31 @@ struct sbmac_softc {
int sbm_phy_oldk1stsr;
int sbm_phy_oldlinkstat;
int sbm_buffersize;
-
+
unsigned char sbm_phys[2];
-
+
/*
* Controller-specific things
*/
-
- unsigned long sbm_base; /* MAC's base address */
+
+ volatile void __iomem *sbm_base; /* MAC's base address */
sbmac_state_t sbm_state; /* current state */
-
- sbmac_port_t sbm_macenable; /* MAC Enable Register */
- sbmac_port_t sbm_maccfg; /* MAC Configuration Register */
- sbmac_port_t sbm_fifocfg; /* FIFO configuration register */
- sbmac_port_t sbm_framecfg; /* Frame configuration register */
- sbmac_port_t sbm_rxfilter; /* receive filter register */
- sbmac_port_t sbm_isr; /* Interrupt status register */
- sbmac_port_t sbm_imr; /* Interrupt mask register */
- sbmac_port_t sbm_mdio; /* MDIO register */
-
+
+ volatile void __iomem *sbm_macenable; /* MAC Enable Register */
+ volatile void __iomem *sbm_maccfg; /* MAC Configuration Register */
+ volatile void __iomem *sbm_fifocfg; /* FIFO configuration register */
+ volatile void __iomem *sbm_framecfg; /* Frame configuration register */
+ volatile void __iomem *sbm_rxfilter; /* receive filter register */
+ volatile void __iomem *sbm_isr; /* Interrupt status register */
+ volatile void __iomem *sbm_imr; /* Interrupt mask register */
+ volatile void __iomem *sbm_mdio; /* MDIO register */
+
sbmac_speed_t sbm_speed; /* current speed */
sbmac_duplex_t sbm_duplex; /* current duplex */
sbmac_fc_t sbm_fc; /* current flow control setting */
-
+
unsigned char sbm_hwaddr[ETHER_ADDR_LEN];
-
+
sbmacdma_t sbm_txdma; /* for now, only use channel 0 */
sbmacdma_t sbm_rxdma;
int rx_hw_checksum;
@@ -302,6 +296,7 @@ static void sbmac_set_rx_mode(struct net_device *dev);
static int sbmac_mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
static int sbmac_close(struct net_device *dev);
static int sbmac_mii_poll(struct sbmac_softc *s,int noisy);
+static int sbmac_mii_probe(struct net_device *dev);
static void sbmac_mii_sync(struct sbmac_softc *s);
static void sbmac_mii_senddata(struct sbmac_softc *s,unsigned int data, int bitcnt);
@@ -439,6 +434,9 @@ static uint64_t sbmac_orig_hwaddr[MAX_UNITS];
#define MII_BMCR 0x00 /* Basic mode control register (rw) */
#define MII_BMSR 0x01 /* Basic mode status register (ro) */
+#define MII_PHYIDR1 0x02
+#define MII_PHYIDR2 0x03
+
#define MII_K1STSR 0x0A /* 1K Status Register (ro) */
#define MII_ANLPAR 0x05 /* Autonegotiation lnk partner abilities (rw) */
@@ -450,13 +448,13 @@ static uint64_t sbmac_orig_hwaddr[MAX_UNITS];
/**********************************************************************
* SBMAC_MII_SYNC(s)
- *
+ *
* Synchronize with the MII - send a pattern of bits to the MII
* that will guarantee that it is ready to accept a command.
- *
- * Input parameters:
+ *
+ * Input parameters:
* s - sbmac structure
- *
+ *
* Return value:
* nothing
********************************************************************* */
@@ -467,25 +465,25 @@ static void sbmac_mii_sync(struct sbmac_softc *s)
uint64_t bits;
int mac_mdio_genc;
- mac_mdio_genc = SBMAC_READCSR(s->sbm_mdio) & M_MAC_GENC;
-
+ mac_mdio_genc = __raw_readq(s->sbm_mdio) & M_MAC_GENC;
+
bits = M_MAC_MDIO_DIR_OUTPUT | M_MAC_MDIO_OUT;
-
- SBMAC_WRITECSR(s->sbm_mdio,bits | mac_mdio_genc);
-
+
+ __raw_writeq(bits | mac_mdio_genc, s->sbm_mdio);
+
for (cnt = 0; cnt < 32; cnt++) {
- SBMAC_WRITECSR(s->sbm_mdio,bits | M_MAC_MDC | mac_mdio_genc);
- SBMAC_WRITECSR(s->sbm_mdio,bits | mac_mdio_genc);
+ __raw_writeq(bits | M_MAC_MDC | mac_mdio_genc, s->sbm_mdio);
+ __raw_writeq(bits | mac_mdio_genc, s->sbm_mdio);
}
}
/**********************************************************************
* SBMAC_MII_SENDDATA(s,data,bitcnt)
- *
+ *
* Send some bits to the MII. The bits to be sent are right-
* justified in the 'data' parameter.
- *
- * Input parameters:
+ *
+ * Input parameters:
* s - sbmac structure
* data - data to send
* bitcnt - number of bits to send
@@ -498,20 +496,20 @@ static void sbmac_mii_senddata(struct sbmac_softc *s,unsigned int data, int bitc
unsigned int curmask;
int mac_mdio_genc;
- mac_mdio_genc = SBMAC_READCSR(s->sbm_mdio) & M_MAC_GENC;
-
+ mac_mdio_genc = __raw_readq(s->sbm_mdio) & M_MAC_GENC;
+
bits = M_MAC_MDIO_DIR_OUTPUT;
- SBMAC_WRITECSR(s->sbm_mdio,bits | mac_mdio_genc);
-
+ __raw_writeq(bits | mac_mdio_genc, s->sbm_mdio);
+
curmask = 1 << (bitcnt - 1);
-
+
for (i = 0; i < bitcnt; i++) {
if (data & curmask)
bits |= M_MAC_MDIO_OUT;
else bits &= ~M_MAC_MDIO_OUT;
- SBMAC_WRITECSR(s->sbm_mdio,bits | mac_mdio_genc);
- SBMAC_WRITECSR(s->sbm_mdio,bits | M_MAC_MDC | mac_mdio_genc);
- SBMAC_WRITECSR(s->sbm_mdio,bits | mac_mdio_genc);
+ __raw_writeq(bits | mac_mdio_genc, s->sbm_mdio);
+ __raw_writeq(bits | M_MAC_MDC | mac_mdio_genc, s->sbm_mdio);
+ __raw_writeq(bits | mac_mdio_genc, s->sbm_mdio);
curmask >>= 1;
}
}
@@ -520,14 +518,14 @@ static void sbmac_mii_senddata(struct sbmac_softc *s,unsigned int data, int bitc
/**********************************************************************
* SBMAC_MII_READ(s,phyaddr,regidx)
- *
+ *
* Read a PHY register.
- *
- * Input parameters:
+ *
+ * Input parameters:
* s - sbmac structure
* phyaddr - PHY's address
* regidx = index of register to read
- *
+ *
* Return value:
* value read, or 0 if an error occurred.
********************************************************************* */
@@ -543,9 +541,9 @@ static unsigned int sbmac_mii_read(struct sbmac_softc *s,int phyaddr,int regidx)
* Synchronize ourselves so that the PHY knows the next
* thing coming down is a command
*/
-
+
sbmac_mii_sync(s);
-
+
/*
* Send the data to the PHY. The sequence is
* a "start" command (2 bits)
@@ -553,59 +551,55 @@ static unsigned int sbmac_mii_read(struct sbmac_softc *s,int phyaddr,int regidx)
* the PHY addr (5 bits)
* the register index (5 bits)
*/
-
+
sbmac_mii_senddata(s,MII_COMMAND_START, 2);
sbmac_mii_senddata(s,MII_COMMAND_READ, 2);
sbmac_mii_senddata(s,phyaddr, 5);
sbmac_mii_senddata(s,regidx, 5);
-
- mac_mdio_genc = SBMAC_READCSR(s->sbm_mdio) & M_MAC_GENC;
-
- /*
+
+ mac_mdio_genc = __raw_readq(s->sbm_mdio) & M_MAC_GENC;
+
+ /*
* Switch the port around without a clock transition.
*/
- SBMAC_WRITECSR(s->sbm_mdio,M_MAC_MDIO_DIR_INPUT | mac_mdio_genc);
-
+ __raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, s->sbm_mdio);
+
/*
* Send out a clock pulse to signal we want the status
*/
-
- SBMAC_WRITECSR(s->sbm_mdio,
- M_MAC_MDIO_DIR_INPUT | M_MAC_MDC | mac_mdio_genc);
- SBMAC_WRITECSR(s->sbm_mdio,M_MAC_MDIO_DIR_INPUT | mac_mdio_genc);
-
- /*
+
+ __raw_writeq(M_MAC_MDIO_DIR_INPUT | M_MAC_MDC | mac_mdio_genc, s->sbm_mdio);
+ __raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, s->sbm_mdio);
+
+ /*
* If an error occurred, the PHY will signal '1' back
*/
- error = SBMAC_READCSR(s->sbm_mdio) & M_MAC_MDIO_IN;
-
- /*
+ error = __raw_readq(s->sbm_mdio) & M_MAC_MDIO_IN;
+
+ /*
* Issue an 'idle' clock pulse, but keep the direction
* the same.
*/
- SBMAC_WRITECSR(s->sbm_mdio,
- M_MAC_MDIO_DIR_INPUT | M_MAC_MDC | mac_mdio_genc);
- SBMAC_WRITECSR(s->sbm_mdio,M_MAC_MDIO_DIR_INPUT | mac_mdio_genc);
-
+ __raw_writeq(M_MAC_MDIO_DIR_INPUT | M_MAC_MDC | mac_mdio_genc, s->sbm_mdio);
+ __raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, s->sbm_mdio);
+
regval = 0;
-
+
for (idx = 0; idx < 16; idx++) {
regval <<= 1;
-
+
if (error == 0) {
- if (SBMAC_READCSR(s->sbm_mdio) & M_MAC_MDIO_IN)
+ if (__raw_readq(s->sbm_mdio) & M_MAC_MDIO_IN)
regval |= 1;
}
-
- SBMAC_WRITECSR(s->sbm_mdio,
- M_MAC_MDIO_DIR_INPUT|M_MAC_MDC | mac_mdio_genc);
- SBMAC_WRITECSR(s->sbm_mdio,
- M_MAC_MDIO_DIR_INPUT | mac_mdio_genc);
+
+ __raw_writeq(M_MAC_MDIO_DIR_INPUT|M_MAC_MDC | mac_mdio_genc, s->sbm_mdio);
+ __raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, s->sbm_mdio);
}
-
+
/* Switch back to output */
- SBMAC_WRITECSR(s->sbm_mdio,M_MAC_MDIO_DIR_OUTPUT | mac_mdio_genc);
-
+ __raw_writeq(M_MAC_MDIO_DIR_OUTPUT | mac_mdio_genc, s->sbm_mdio);
+
if (error == 0)
return regval;
return 0;
@@ -614,15 +608,15 @@ static unsigned int sbmac_mii_read(struct sbmac_softc *s,int phyaddr,int regidx)
/**********************************************************************
* SBMAC_MII_WRITE(s,phyaddr,regidx,regval)
- *
+ *
* Write a value to a PHY register.
- *
- * Input parameters:
+ *
+ * Input parameters:
* s - sbmac structure
* phyaddr - PHY to use
* regidx - register within the PHY
* regval - data to write to register
- *
+ *
* Return value:
* nothing
********************************************************************* */
@@ -633,7 +627,7 @@ static void sbmac_mii_write(struct sbmac_softc *s,int phyaddr,int regidx,
int mac_mdio_genc;
sbmac_mii_sync(s);
-
+
sbmac_mii_senddata(s,MII_COMMAND_START,2);
sbmac_mii_senddata(s,MII_COMMAND_WRITE,2);
sbmac_mii_senddata(s,phyaddr, 5);
@@ -641,27 +635,27 @@ static void sbmac_mii_write(struct sbmac_softc *s,int phyaddr,int regidx,
sbmac_mii_senddata(s,MII_COMMAND_ACK,2);
sbmac_mii_senddata(s,regval,16);
- mac_mdio_genc = SBMAC_READCSR(s->sbm_mdio) & M_MAC_GENC;
+ mac_mdio_genc = __raw_readq(s->sbm_mdio) & M_MAC_GENC;
- SBMAC_WRITECSR(s->sbm_mdio,M_MAC_MDIO_DIR_OUTPUT | mac_mdio_genc);
+ __raw_writeq(M_MAC_MDIO_DIR_OUTPUT | mac_mdio_genc, s->sbm_mdio);
}
/**********************************************************************
* SBDMA_INITCTX(d,s,chan,txrx,maxdescr)
- *
+ *
* Initialize a DMA channel context. Since there are potentially
* eight DMA channels per MAC, it's nice to do this in a standard
- * way.
- *
- * Input parameters:
+ * way.
+ *
+ * Input parameters:
* d - sbmacdma_t structure (DMA channel context)
* s - sbmac_softc structure (pointer to a MAC)
* chan - channel number (0..1 right now)
* txrx - Identifies DMA_TX or DMA_RX for channel direction
* maxdescr - number of descriptors
- *
+ *
* Return value:
* nothing
********************************************************************* */
@@ -672,101 +666,87 @@ static void sbdma_initctx(sbmacdma_t *d,
int txrx,
int maxdescr)
{
- /*
- * Save away interesting stuff in the structure
+ /*
+ * Save away interesting stuff in the structure
*/
-
+
d->sbdma_eth = s;
d->sbdma_channel = chan;
d->sbdma_txdir = txrx;
-
+
#if 0
/* RMON clearing */
s->sbe_idx =(s->sbm_base - A_MAC_BASE_0)/MAC_SPACING;
#endif
- SBMAC_WRITECSR(IOADDR(
- A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_TX_BYTES)), 0);
- SBMAC_WRITECSR(IOADDR(
- A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_COLLISIONS)), 0);
- SBMAC_WRITECSR(IOADDR(
- A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_LATE_COL)), 0);
- SBMAC_WRITECSR(IOADDR(
- A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_EX_COL)), 0);
- SBMAC_WRITECSR(IOADDR(
- A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_FCS_ERROR)), 0);
- SBMAC_WRITECSR(IOADDR(
- A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_TX_ABORT)), 0);
- SBMAC_WRITECSR(IOADDR(
- A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_TX_BAD)), 0);
- SBMAC_WRITECSR(IOADDR(
- A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_TX_GOOD)), 0);
- SBMAC_WRITECSR(IOADDR(
- A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_TX_RUNT)), 0);
- SBMAC_WRITECSR(IOADDR(
- A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_TX_OVERSIZE)), 0);
- SBMAC_WRITECSR(IOADDR(
- A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_BYTES)), 0);
- SBMAC_WRITECSR(IOADDR(
- A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_MCAST)), 0);
- SBMAC_WRITECSR(IOADDR(
- A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_BCAST)), 0);
- SBMAC_WRITECSR(IOADDR(
- A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_BAD)), 0);
- SBMAC_WRITECSR(IOADDR(
- A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_GOOD)), 0);
- SBMAC_WRITECSR(IOADDR(
- A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_RUNT)), 0);
- SBMAC_WRITECSR(IOADDR(
- A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_OVERSIZE)), 0);
- SBMAC_WRITECSR(IOADDR(
- A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_FCS_ERROR)), 0);
- SBMAC_WRITECSR(IOADDR(
- A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_LENGTH_ERROR)), 0);
- SBMAC_WRITECSR(IOADDR(
- A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_CODE_ERROR)), 0);
- SBMAC_WRITECSR(IOADDR(
- A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_ALIGN_ERROR)), 0);
-
- /*
- * initialize register pointers
- */
-
- d->sbdma_config0 =
+ __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_TX_BYTES)));
+ __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_COLLISIONS)));
+ __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_LATE_COL)));
+ __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_EX_COL)));
+ __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_FCS_ERROR)));
+ __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_TX_ABORT)));
+ __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_TX_BAD)));
+ __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_TX_GOOD)));
+ __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_TX_RUNT)));
+ __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_TX_OVERSIZE)));
+ __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_BYTES)));
+ __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_MCAST)));
+ __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_BCAST)));
+ __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_BAD)));
+ __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_GOOD)));
+ __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_RUNT)));
+ __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_OVERSIZE)));
+ __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_FCS_ERROR)));
+ __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_LENGTH_ERROR)));
+ __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_CODE_ERROR)));
+ __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_ALIGN_ERROR)));
+
+ /*
+ * initialize register pointers
+ */
+
+ d->sbdma_config0 =
s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_CONFIG0);
- d->sbdma_config1 =
+ d->sbdma_config1 =
s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_CONFIG1);
- d->sbdma_dscrbase =
+ d->sbdma_dscrbase =
s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_DSCR_BASE);
- d->sbdma_dscrcnt =
+ d->sbdma_dscrcnt =
s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_DSCR_CNT);
- d->sbdma_curdscr =
+ d->sbdma_curdscr =
s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_CUR_DSCRADDR);
-
+
/*
* Allocate memory for the ring
*/
-
+
d->sbdma_maxdescr = maxdescr;
-
- d->sbdma_dscrtable = (sbdmadscr_t *)
- kmalloc(d->sbdma_maxdescr*sizeof(sbdmadscr_t), GFP_KERNEL);
-
+
+ d->sbdma_dscrtable = (sbdmadscr_t *)
+ kmalloc((d->sbdma_maxdescr+1)*sizeof(sbdmadscr_t), GFP_KERNEL);
+
+ /*
+ * The descriptor table must be aligned to at least 16 bytes or the
+ * MAC will corrupt it.
+ */
+ d->sbdma_dscrtable = (sbdmadscr_t *)
+ ALIGN((unsigned long)d->sbdma_dscrtable, sizeof(sbdmadscr_t));
+
memset(d->sbdma_dscrtable,0,d->sbdma_maxdescr*sizeof(sbdmadscr_t));
-
+
d->sbdma_dscrtable_end = d->sbdma_dscrtable + d->sbdma_maxdescr;
-
+
d->sbdma_dscrtable_phys = virt_to_phys(d->sbdma_dscrtable);
-
+
/*
* And context table
*/
-
- d->sbdma_ctxtable = (struct sk_buff **)
+
+ d->sbdma_ctxtable = (struct sk_buff **)
kmalloc(d->sbdma_maxdescr*sizeof(struct sk_buff *), GFP_KERNEL);
-
+
memset(d->sbdma_ctxtable,0,d->sbdma_maxdescr*sizeof(struct sk_buff *));
-
+
#ifdef CONFIG_SBMAC_COALESCE
/*
* Setup Rx/Tx DMA coalescing defaults
@@ -777,7 +757,7 @@ static void sbdma_initctx(sbmacdma_t *d,
} else {
d->sbdma_int_pktcnt = 1;
}
-
+
if ( int_timeout ) {
d->sbdma_int_timeout = int_timeout;
} else {
@@ -789,13 +769,13 @@ static void sbdma_initctx(sbmacdma_t *d,
/**********************************************************************
* SBDMA_CHANNEL_START(d)
- *
+ *
* Initialize the hardware registers for a DMA channel.
- *
- * Input parameters:
+ *
+ * Input parameters:
* d - DMA channel to init (context must be previously init'd
* rxtx - DMA_RX or DMA_TX depending on what type of channel
- *
+ *
* Return value:
* nothing
********************************************************************* */
@@ -805,24 +785,21 @@ static void sbdma_channel_start(sbmacdma_t *d, int rxtx )
/*
* Turn on the DMA channel
*/
-
+
#ifdef CONFIG_SBMAC_COALESCE
- SBMAC_WRITECSR(d->sbdma_config1,
- V_DMA_INT_TIMEOUT(d->sbdma_int_timeout) |
- 0);
- SBMAC_WRITECSR(d->sbdma_config0,
- M_DMA_EOP_INT_EN |
+ __raw_writeq(V_DMA_INT_TIMEOUT(d->sbdma_int_timeout) |
+ 0, d->sbdma_config1);
+ __raw_writeq(M_DMA_EOP_INT_EN |
V_DMA_RINGSZ(d->sbdma_maxdescr) |
V_DMA_INT_PKTCNT(d->sbdma_int_pktcnt) |
- 0);
+ 0, d->sbdma_config0);
#else
- SBMAC_WRITECSR(d->sbdma_config1,0);
- SBMAC_WRITECSR(d->sbdma_config0,
- V_DMA_RINGSZ(d->sbdma_maxdescr) |
- 0);
+ __raw_writeq(0, d->sbdma_config1);
+ __raw_writeq(V_DMA_RINGSZ(d->sbdma_maxdescr) |
+ 0, d->sbdma_config0);
#endif
- SBMAC_WRITECSR(d->sbdma_dscrbase,d->sbdma_dscrtable_phys);
+ __raw_writeq(d->sbdma_dscrtable_phys, d->sbdma_dscrbase);
/*
* Initialize ring pointers
@@ -834,12 +811,12 @@ static void sbdma_channel_start(sbmacdma_t *d, int rxtx )
/**********************************************************************
* SBDMA_CHANNEL_STOP(d)
- *
+ *
* Initialize the hardware registers for a DMA channel.
- *
- * Input parameters:
+ *
+ * Input parameters:
* d - DMA channel to init (context must be previously init'd
- *
+ *
* Return value:
* nothing
********************************************************************* */
@@ -849,44 +826,44 @@ static void sbdma_channel_stop(sbmacdma_t *d)
/*
* Turn off the DMA channel
*/
-
- SBMAC_WRITECSR(d->sbdma_config1,0);
-
- SBMAC_WRITECSR(d->sbdma_dscrbase,0);
-
- SBMAC_WRITECSR(d->sbdma_config0,0);
-
+
+ __raw_writeq(0, d->sbdma_config1);
+
+ __raw_writeq(0, d->sbdma_dscrbase);
+
+ __raw_writeq(0, d->sbdma_config0);
+
/*
* Zero ring pointers
*/
-
- d->sbdma_addptr = 0;
- d->sbdma_remptr = 0;
+
+ d->sbdma_addptr = NULL;
+ d->sbdma_remptr = NULL;
}
static void sbdma_align_skb(struct sk_buff *skb,int power2,int offset)
{
unsigned long addr;
unsigned long newaddr;
-
+
addr = (unsigned long) skb->data;
-
+
newaddr = (addr + power2 - 1) & ~(power2 - 1);
-
+
skb_reserve(skb,newaddr-addr+offset);
}
/**********************************************************************
* SBDMA_ADD_RCVBUFFER(d,sb)
- *
+ *
* Add a buffer to the specified DMA channel. For receive channels,
* this queues a buffer for inbound packets.
- *
- * Input parameters:
+ *
+ * Input parameters:
* d - DMA channel descriptor
* sb - sk_buff to add, or NULL if we should allocate one
- *
+ *
* Return value:
* 0 if buffer could not be added (ring is full)
* 1 if buffer added successfully
@@ -899,24 +876,24 @@ static int sbdma_add_rcvbuffer(sbmacdma_t *d,struct sk_buff *sb)
sbdmadscr_t *nextdsc;
struct sk_buff *sb_new = NULL;
int pktsize = ENET_PACKET_SIZE;
-
+
/* get pointer to our current place in the ring */
-
+
dsc = d->sbdma_addptr;
nextdsc = SBDMA_NEXTBUF(d,sbdma_addptr);
-
+
/*
* figure out if the ring is full - if the next descriptor
* is the same as the one that we're going to remove from
* the ring, the ring is full
*/
-
+
if (nextdsc == d->sbdma_remptr) {
return -ENOSPC;
}
- /*
- * Allocate a sk_buff if we don't already have one.
+ /*
+ * Allocate a sk_buff if we don't already have one.
* If we do have an sk_buff, reset it so that it's empty.
*
* Note: sk_buffs don't seem to be guaranteed to have any sort
@@ -925,7 +902,7 @@ static int sbdma_add_rcvbuffer(sbmacdma_t *d,struct sk_buff *sb)
*
* 1. the data does not start in the middle of a cache line.
* 2. The data does not end in the middle of a cache line
- * 3. The buffer can be aligned such that the IP addresses are
+ * 3. The buffer can be aligned such that the IP addresses are
* naturally aligned.
*
* Remember, the SOCs MAC writes whole cache lines at a time,
@@ -933,7 +910,7 @@ static int sbdma_add_rcvbuffer(sbmacdma_t *d,struct sk_buff *sb)
* data portion starts in the middle of a cache line, the SOC
* DMA will trash the beginning (and ending) portions.
*/
-
+
if (sb == NULL) {
sb_new = dev_alloc_skb(ENET_PACKET_SIZE + SMP_CACHE_BYTES * 2 + ETHER_ALIGN);
if (sb_new == NULL) {
@@ -949,23 +926,22 @@ static int sbdma_add_rcvbuffer(sbmacdma_t *d,struct sk_buff *sb)
}
else {
sb_new = sb;
- /*
+ /*
* nothing special to reinit buffer, it's already aligned
* and sb->data already points to a good place.
*/
}
-
+
/*
- * fill in the descriptor
+ * fill in the descriptor
*/
-
+
#ifdef CONFIG_SBMAC_COALESCE
/*
* Do not interrupt per DMA transfer.
*/
dsc->dscr_a = virt_to_phys(sb_new->data) |
- V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize+ETHER_ALIGN)) |
- 0;
+ V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize+ETHER_ALIGN)) | 0;
#else
dsc->dscr_a = virt_to_phys(sb_new->data) |
V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize+ETHER_ALIGN)) |
@@ -974,38 +950,38 @@ static int sbdma_add_rcvbuffer(sbmacdma_t *d,struct sk_buff *sb)
/* receiving: no options */
dsc->dscr_b = 0;
-
+
/*
- * fill in the context
+ * fill in the context
*/
-
+
d->sbdma_ctxtable[dsc-d->sbdma_dscrtable] = sb_new;
-
- /*
- * point at next packet
+
+ /*
+ * point at next packet
*/
-
+
d->sbdma_addptr = nextdsc;
-
- /*
+
+ /*
* Give the buffer to the DMA engine.
*/
-
- SBMAC_WRITECSR(d->sbdma_dscrcnt,1);
-
+
+ __raw_writeq(1, d->sbdma_dscrcnt);
+
return 0; /* we did it */
}
/**********************************************************************
* SBDMA_ADD_TXBUFFER(d,sb)
- *
+ *
* Add a transmit buffer to the specified DMA channel, causing a
* transmit to start.
- *
- * Input parameters:
+ *
+ * Input parameters:
* d - DMA channel descriptor
* sb - sk_buff to add
- *
+ *
* Return value:
* 0 transmit queued successfully
* otherwise error code
@@ -1019,70 +995,70 @@ static int sbdma_add_txbuffer(sbmacdma_t *d,struct sk_buff *sb)
uint64_t phys;
uint64_t ncb;
int length;
-
+
/* get pointer to our current place in the ring */
-
+
dsc = d->sbdma_addptr;
nextdsc = SBDMA_NEXTBUF(d,sbdma_addptr);
-
+
/*
* figure out if the ring is full - if the next descriptor
* is the same as the one that we're going to remove from
* the ring, the ring is full
*/
-
+
if (nextdsc == d->sbdma_remptr) {
return -ENOSPC;
}
-
+
/*
* Under Linux, it's not necessary to copy/coalesce buffers
* like it is on NetBSD. We think they're all contiguous,
* but that may not be true for GBE.
*/
-
+
length = sb->len;
-
+
/*
* fill in the descriptor. Note that the number of cache
* blocks in the descriptor is the number of blocks
* *spanned*, so we need to add in the offset (if any)
* while doing the calculation.
*/
-
+
phys = virt_to_phys(sb->data);
ncb = NUMCACHEBLKS(length+(phys & (SMP_CACHE_BYTES - 1)));
- dsc->dscr_a = phys |
+ dsc->dscr_a = phys |
V_DMA_DSCRA_A_SIZE(ncb) |
#ifndef CONFIG_SBMAC_COALESCE
M_DMA_DSCRA_INTERRUPT |
#endif
M_DMA_ETHTX_SOP;
-
+
/* transmitting: set outbound options and length */
dsc->dscr_b = V_DMA_DSCRB_OPTIONS(K_DMA_ETHTX_APPENDCRC_APPENDPAD) |
V_DMA_DSCRB_PKT_SIZE(length);
-
+
/*
- * fill in the context
+ * fill in the context
*/
-
+
d->sbdma_ctxtable[dsc-d->sbdma_dscrtable] = sb;
-
- /*
- * point at next packet
+
+ /*
+ * point at next packet
*/
-
+
d->sbdma_addptr = nextdsc;
-
- /*
+
+ /*
* Give the buffer to the DMA engine.
*/
-
- SBMAC_WRITECSR(d->sbdma_dscrcnt,1);
-
+
+ __raw_writeq(1, d->sbdma_dscrcnt);
+
return 0; /* we did it */
}
@@ -1091,12 +1067,12 @@ static int sbdma_add_txbuffer(sbmacdma_t *d,struct sk_buff *sb)
/**********************************************************************
* SBDMA_EMPTYRING(d)
- *
+ *
* Free all allocated sk_buffs on the specified DMA channel;
- *
- * Input parameters:
+ *
+ * Input parameters:
* d - DMA channel
- *
+ *
* Return value:
* nothing
********************************************************************* */
@@ -1105,7 +1081,7 @@ static void sbdma_emptyring(sbmacdma_t *d)
{
int idx;
struct sk_buff *sb;
-
+
for (idx = 0; idx < d->sbdma_maxdescr; idx++) {
sb = d->sbdma_ctxtable[idx];
if (sb) {
@@ -1118,13 +1094,13 @@ static void sbdma_emptyring(sbmacdma_t *d)
/**********************************************************************
* SBDMA_FILLRING(d)
- *
+ *
* Fill the specified DMA channel (must be receive channel)
* with sk_buffs
- *
- * Input parameters:
+ *
+ * Input parameters:
* d - DMA channel
- *
+ *
* Return value:
* nothing
********************************************************************* */
@@ -1132,7 +1108,7 @@ static void sbdma_emptyring(sbmacdma_t *d)
static void sbdma_fillring(sbmacdma_t *d)
{
int idx;
-
+
for (idx = 0; idx < SBMAC_MAX_RXDESCR-1; idx++) {
if (sbdma_add_rcvbuffer(d,NULL) != 0)
break;
@@ -1142,16 +1118,16 @@ static void sbdma_fillring(sbmacdma_t *d)
/**********************************************************************
* SBDMA_RX_PROCESS(sc,d)
- *
- * Process "completed" receive buffers on the specified DMA channel.
+ *
+ * Process "completed" receive buffers on the specified DMA channel.
* Note that this isn't really ideal for priority channels, since
- * it processes all of the packets on a given channel before
- * returning.
+ * it processes all of the packets on a given channel before
+ * returning.
*
- * Input parameters:
+ * Input parameters:
* sc - softc structure
* d - DMA channel context
- *
+ *
* Return value:
* nothing
********************************************************************* */
@@ -1163,56 +1139,56 @@ static void sbdma_rx_process(struct sbmac_softc *sc,sbmacdma_t *d)
sbdmadscr_t *dsc;
struct sk_buff *sb;
int len;
-
+
for (;;) {
- /*
+ /*
* figure out where we are (as an index) and where
* the hardware is (also as an index)
*
- * This could be done faster if (for example) the
+ * This could be done faster if (for example) the
* descriptor table was page-aligned and contiguous in
* both virtual and physical memory -- you could then
* just compare the low-order bits of the virtual address
* (sbdma_remptr) and the physical address (sbdma_curdscr CSR)
*/
-
+
curidx = d->sbdma_remptr - d->sbdma_dscrtable;
- hwidx = (int) (((SBMAC_READCSR(d->sbdma_curdscr) & M_DMA_CURDSCR_ADDR) -
+ hwidx = (int) (((__raw_readq(d->sbdma_curdscr) & M_DMA_CURDSCR_ADDR) -
d->sbdma_dscrtable_phys) / sizeof(sbdmadscr_t));
-
+
/*
* If they're the same, that means we've processed all
* of the descriptors up to (but not including) the one that
* the hardware is working on right now.
*/
-
+
if (curidx == hwidx)
break;
-
+
/*
* Otherwise, get the packet's sk_buff ptr back
*/
-
+
dsc = &(d->sbdma_dscrtable[curidx]);
sb = d->sbdma_ctxtable[curidx];
d->sbdma_ctxtable[curidx] = NULL;
-
+
len = (int)G_DMA_DSCRB_PKT_SIZE(dsc->dscr_b) - 4;
-
+
/*
* Check packet status. If good, process it.
* If not, silently drop it and put it back on the
* receive ring.
*/
-
+
if (!(dsc->dscr_a & M_DMA_ETHRX_BAD)) {
-
+
/*
* Add a new buffer to replace the old one. If we fail
* to allocate a buffer, we're going to drop this
* packet and put it right back on the receive ring.
*/
-
+
if (sbdma_add_rcvbuffer(d,NULL) == -ENOBUFS) {
sc->sbm_stats.rx_dropped++;
sbdma_add_rcvbuffer(d,sb); /* re-add old buffer */
@@ -1221,7 +1197,7 @@ static void sbdma_rx_process(struct sbmac_softc *sc,sbmacdma_t *d)
* Set length into the packet
*/
skb_put(sb,len);
-
+
/*
* Buffer has been replaced on the
* receive ring. Pass the buffer to
@@ -1240,7 +1216,7 @@ static void sbdma_rx_process(struct sbmac_softc *sc,sbmacdma_t *d)
sb->ip_summed = CHECKSUM_NONE;
}
}
-
+
netif_rx(sb);
}
} else {
@@ -1251,14 +1227,14 @@ static void sbdma_rx_process(struct sbmac_softc *sc,sbmacdma_t *d)
sc->sbm_stats.rx_errors++;
sbdma_add_rcvbuffer(d,sb);
}
-
-
- /*
+
+
+ /*
* .. and advance to the next buffer.
*/
-
+
d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr);
-
+
}
}
@@ -1266,17 +1242,17 @@ static void sbdma_rx_process(struct sbmac_softc *sc,sbmacdma_t *d)
/**********************************************************************
* SBDMA_TX_PROCESS(sc,d)
- *
- * Process "completed" transmit buffers on the specified DMA channel.
+ *
+ * Process "completed" transmit buffers on the specified DMA channel.
* This is normally called within the interrupt service routine.
* Note that this isn't really ideal for priority channels, since
- * it processes all of the packets on a given channel before
- * returning.
+ * it processes all of the packets on a given channel before
+ * returning.
*
- * Input parameters:
+ * Input parameters:
* sc - softc structure
* d - DMA channel context
- *
+ *
* Return value:
* nothing
********************************************************************* */
@@ -1290,21 +1266,21 @@ static void sbdma_tx_process(struct sbmac_softc *sc,sbmacdma_t *d)
unsigned long flags;
spin_lock_irqsave(&(sc->sbm_lock), flags);
-
+
for (;;) {
- /*
+ /*
* figure out where we are (as an index) and where
* the hardware is (also as an index)
*
- * This could be done faster if (for example) the
+ * This could be done faster if (for example) the
* descriptor table was page-aligned and contiguous in
* both virtual and physical memory -- you could then
* just compare the low-order bits of the virtual address
* (sbdma_remptr) and the physical address (sbdma_curdscr CSR)
*/
-
+
curidx = d->sbdma_remptr - d->sbdma_dscrtable;
- hwidx = (int) (((SBMAC_READCSR(d->sbdma_curdscr) & M_DMA_CURDSCR_ADDR) -
+ hwidx = (int) (((__raw_readq(d->sbdma_curdscr) & M_DMA_CURDSCR_ADDR) -
d->sbdma_dscrtable_phys) / sizeof(sbdmadscr_t));
/*
@@ -1312,75 +1288,75 @@ static void sbdma_tx_process(struct sbmac_softc *sc,sbmacdma_t *d)
* of the descriptors up to (but not including) the one that
* the hardware is working on right now.
*/
-
+
if (curidx == hwidx)
break;
-
+
/*
* Otherwise, get the packet's sk_buff ptr back
*/
-
+
dsc = &(d->sbdma_dscrtable[curidx]);
sb = d->sbdma_ctxtable[curidx];
d->sbdma_ctxtable[curidx] = NULL;
-
+
/*
* Stats
*/
-
+
sc->sbm_stats.tx_bytes += sb->len;
sc->sbm_stats.tx_packets++;
-
+
/*
* for transmits, we just free buffers.
*/
-
+
dev_kfree_skb_irq(sb);
-
- /*
+
+ /*
* .. and advance to the next buffer.
*/
d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr);
-
+
}
-
+
/*
* Decide if we should wake up the protocol or not.
* Other drivers seem to do this when we reach a low
* watermark on the transmit queue.
*/
-
+
netif_wake_queue(d->sbdma_eth->sbm_dev);
-
+
spin_unlock_irqrestore(&(sc->sbm_lock), flags);
-
+
}
/**********************************************************************
* SBMAC_INITCTX(s)
- *
+ *
* Initialize an Ethernet context structure - this is called
* once per MAC on the 1250. Memory is allocated here, so don't
* call it again from inside the ioctl routines that bring the
* interface up/down
- *
- * Input parameters:
+ *
+ * Input parameters:
* s - sbmac context structure
- *
+ *
* Return value:
* 0
********************************************************************* */
static int sbmac_initctx(struct sbmac_softc *s)
{
-
- /*
- * figure out the addresses of some ports
+
+ /*
+ * figure out the addresses of some ports
*/
-
+
s->sbm_macenable = s->sbm_base + R_MAC_ENABLE;
s->sbm_maccfg = s->sbm_base + R_MAC_CFG;
s->sbm_fifocfg = s->sbm_base + R_MAC_THRSH_CFG;
@@ -1397,29 +1373,29 @@ static int sbmac_initctx(struct sbmac_softc *s)
s->sbm_phy_oldanlpar = 0;
s->sbm_phy_oldk1stsr = 0;
s->sbm_phy_oldlinkstat = 0;
-
+
/*
* Initialize the DMA channels. Right now, only one per MAC is used
* Note: Only do this _once_, as it allocates memory from the kernel!
*/
-
+
sbdma_initctx(&(s->sbm_txdma),s,0,DMA_TX,SBMAC_MAX_TXDESCR);
sbdma_initctx(&(s->sbm_rxdma),s,0,DMA_RX,SBMAC_MAX_RXDESCR);
-
+
/*
* initial state is OFF
*/
-
+
s->sbm_state = sbmac_state_off;
-
+
/*
* Initial speed is (XXX TEMP) 10MBit/s HDX no FC
*/
-
+
s->sbm_speed = sbmac_speed_10;
s->sbm_duplex = sbmac_duplex_half;
s->sbm_fc = sbmac_fc_disabled;
-
+
return 0;
}
@@ -1430,7 +1406,7 @@ static void sbdma_uninitctx(struct sbmacdma_s *d)
kfree(d->sbdma_dscrtable);
d->sbdma_dscrtable = NULL;
}
-
+
if (d->sbdma_ctxtable) {
kfree(d->sbdma_ctxtable);
d->sbdma_ctxtable = NULL;
@@ -1447,12 +1423,12 @@ static void sbmac_uninitctx(struct sbmac_softc *sc)
/**********************************************************************
* SBMAC_CHANNEL_START(s)
- *
+ *
* Start packet processing on this MAC.
- *
- * Input parameters:
+ *
+ * Input parameters:
* s - sbmac structure
- *
+ *
* Return value:
* nothing
********************************************************************* */
@@ -1460,49 +1436,49 @@ static void sbmac_uninitctx(struct sbmac_softc *sc)
static void sbmac_channel_start(struct sbmac_softc *s)
{
uint64_t reg;
- sbmac_port_t port;
+ volatile void __iomem *port;
uint64_t cfg,fifo,framecfg;
int idx, th_value;
-
+
/*
* Don't do this if running
*/
if (s->sbm_state == sbmac_state_on)
return;
-
+
/*
* Bring the controller out of reset, but leave it off.
*/
-
- SBMAC_WRITECSR(s->sbm_macenable,0);
-
+
+ __raw_writeq(0, s->sbm_macenable);
+
/*
* Ignore all received packets
*/
-
- SBMAC_WRITECSR(s->sbm_rxfilter,0);
-
- /*
+
+ __raw_writeq(0, s->sbm_rxfilter);
+
+ /*
* Calculate values for various control registers.
*/
-
+
cfg = M_MAC_RETRY_EN |
- M_MAC_TX_HOLD_SOP_EN |
+ M_MAC_TX_HOLD_SOP_EN |
V_MAC_TX_PAUSE_CNT_16K |
M_MAC_AP_STAT_EN |
M_MAC_FAST_SYNC |
M_MAC_SS_EN |
0;
-
- /*
+
+ /*
* Be sure that RD_THRSH+WR_THRSH <= 32 for pass1 pars
* and make sure that RD_THRSH + WR_THRSH <=128 for pass2 and above
* Use a larger RD_THRSH for gigabit
*/
- if (periph_rev >= 2)
+ if (periph_rev >= 2)
th_value = 64;
- else
+ else
th_value = 28;
fifo = V_MAC_TX_WR_THRSH(4) | /* Must be '4' or '8' */
@@ -1520,51 +1496,51 @@ static void sbmac_channel_start(struct sbmac_softc *s)
V_MAC_BACKOFF_SEL(1);
/*
- * Clear out the hash address map
+ * Clear out the hash address map
*/
-
+
port = s->sbm_base + R_MAC_HASH_BASE;
for (idx = 0; idx < MAC_HASH_COUNT; idx++) {
- SBMAC_WRITECSR(port,0);
+ __raw_writeq(0, port);
port += sizeof(uint64_t);
}
-
+
/*
* Clear out the exact-match table
*/
-
+
port = s->sbm_base + R_MAC_ADDR_BASE;
for (idx = 0; idx < MAC_ADDR_COUNT; idx++) {
- SBMAC_WRITECSR(port,0);
+ __raw_writeq(0, port);
port += sizeof(uint64_t);
}
-
+
/*
* Clear out the DMA Channel mapping table registers
*/
-
+
port = s->sbm_base + R_MAC_CHUP0_BASE;
for (idx = 0; idx < MAC_CHMAP_COUNT; idx++) {
- SBMAC_WRITECSR(port,0);
+ __raw_writeq(0, port);
port += sizeof(uint64_t);
}
port = s->sbm_base + R_MAC_CHLO0_BASE;
for (idx = 0; idx < MAC_CHMAP_COUNT; idx++) {
- SBMAC_WRITECSR(port,0);
+ __raw_writeq(0, port);
port += sizeof(uint64_t);
}
-
+
/*
* Program the hardware address. It goes into the hardware-address
* register as well as the first filter register.
*/
-
+
reg = sbmac_addr2reg(s->sbm_hwaddr);
-
+
port = s->sbm_base + R_MAC_ADDR_BASE;
- SBMAC_WRITECSR(port,reg);
+ __raw_writeq(reg, port);
port = s->sbm_base + R_MAC_ETHERNET_ADDR;
#ifdef CONFIG_SB1_PASS_1_WORKAROUNDS
@@ -1573,108 +1549,105 @@ static void sbmac_channel_start(struct sbmac_softc *s)
* destination address in the R_MAC_ETHERNET_ADDR register.
* Set the value to zero.
*/
- SBMAC_WRITECSR(port,0);
+ __raw_writeq(0, port);
#else
- SBMAC_WRITECSR(port,reg);
+ __raw_writeq(reg, port);
#endif
-
+
/*
* Set the receive filter for no packets, and write values
* to the various config registers
*/
-
- SBMAC_WRITECSR(s->sbm_rxfilter,0);
- SBMAC_WRITECSR(s->sbm_imr,0);
- SBMAC_WRITECSR(s->sbm_framecfg,framecfg);
- SBMAC_WRITECSR(s->sbm_fifocfg,fifo);
- SBMAC_WRITECSR(s->sbm_maccfg,cfg);
-
+
+ __raw_writeq(0, s->sbm_rxfilter);
+ __raw_writeq(0, s->sbm_imr);
+ __raw_writeq(framecfg, s->sbm_framecfg);
+ __raw_writeq(fifo, s->sbm_fifocfg);
+ __raw_writeq(cfg, s->sbm_maccfg);
+
/*
* Initialize DMA channels (rings should be ok now)
*/
-
+
sbdma_channel_start(&(s->sbm_rxdma), DMA_RX);
sbdma_channel_start(&(s->sbm_txdma), DMA_TX);
-
+
/*
* Configure the speed, duplex, and flow control
*/
sbmac_set_speed(s,s->sbm_speed);
sbmac_set_duplex(s,s->sbm_duplex,s->sbm_fc);
-
+
/*
* Fill the receive ring
*/
-
+
sbdma_fillring(&(s->sbm_rxdma));
-
- /*
+
+ /*
* Turn on the rest of the bits in the enable register
- */
-
- SBMAC_WRITECSR(s->sbm_macenable,
- M_MAC_RXDMA_EN0 |
+ */
+
+ __raw_writeq(M_MAC_RXDMA_EN0 |
M_MAC_TXDMA_EN0 |
M_MAC_RX_ENABLE |
- M_MAC_TX_ENABLE);
-
-
+ M_MAC_TX_ENABLE, s->sbm_macenable);
+
+
#ifdef CONFIG_SBMAC_COALESCE
/*
* Accept any TX interrupt and EOP count/timer RX interrupts on ch 0
*/
- SBMAC_WRITECSR(s->sbm_imr,
- ((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_TX_CH0) |
- ((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_RX_CH0));
+ __raw_writeq(((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_TX_CH0) |
+ ((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_RX_CH0), s->sbm_imr);
#else
/*
* Accept any kind of interrupt on TX and RX DMA channel 0
*/
- SBMAC_WRITECSR(s->sbm_imr,
- (M_MAC_INT_CHANNEL << S_MAC_TX_CH0) |
- (M_MAC_INT_CHANNEL << S_MAC_RX_CH0));
+ __raw_writeq((M_MAC_INT_CHANNEL << S_MAC_TX_CH0) |
+ (M_MAC_INT_CHANNEL << S_MAC_RX_CH0), s->sbm_imr);
#endif
-
- /*
- * Enable receiving unicasts and broadcasts
+
+ /*
+ * Enable receiving unicasts and broadcasts
*/
-
- SBMAC_WRITECSR(s->sbm_rxfilter,M_MAC_UCAST_EN | M_MAC_BCAST_EN);
-
+
+ __raw_writeq(M_MAC_UCAST_EN | M_MAC_BCAST_EN, s->sbm_rxfilter);
+
/*
- * we're running now.
+ * we're running now.
*/
-
+
s->sbm_state = sbmac_state_on;
-
- /*
- * Program multicast addresses
+
+ /*
+ * Program multicast addresses
*/
-
+
sbmac_setmulti(s);
-
- /*
- * If channel was in promiscuous mode before, turn that on
+
+ /*
+ * If channel was in promiscuous mode before, turn that on
*/
-
+
if (s->sbm_devflags & IFF_PROMISC) {
sbmac_promiscuous_mode(s,1);
}
-
+
}
/**********************************************************************
* SBMAC_CHANNEL_STOP(s)
- *
+ *
* Stop packet processing on this MAC.
- *
- * Input parameters:
+ *
+ * Input parameters:
* s - sbmac structure
- *
+ *
* Return value:
* nothing
********************************************************************* */
@@ -1682,49 +1655,49 @@ static void sbmac_channel_start(struct sbmac_softc *s)
static void sbmac_channel_stop(struct sbmac_softc *s)
{
/* don't do this if already stopped */
-
+
if (s->sbm_state == sbmac_state_off)
return;
-
+
/* don't accept any packets, disable all interrupts */
-
- SBMAC_WRITECSR(s->sbm_rxfilter,0);
- SBMAC_WRITECSR(s->sbm_imr,0);
-
+
+ __raw_writeq(0, s->sbm_rxfilter);
+ __raw_writeq(0, s->sbm_imr);
+
/* Turn off ticker */
-
+
/* XXX */
-
+
/* turn off receiver and transmitter */
-
- SBMAC_WRITECSR(s->sbm_macenable,0);
-
+
+ __raw_writeq(0, s->sbm_macenable);
+
/* We're stopped now. */
-
+
s->sbm_state = sbmac_state_off;
-
+
/*
* Stop DMA channels (rings should be ok now)
*/
-
+
sbdma_channel_stop(&(s->sbm_rxdma));
sbdma_channel_stop(&(s->sbm_txdma));
-
+
/* Empty the receive and transmit rings */
-
+
sbdma_emptyring(&(s->sbm_rxdma));
sbdma_emptyring(&(s->sbm_txdma));
-
+
}
/**********************************************************************
* SBMAC_SET_CHANNEL_STATE(state)
- *
+ *
* Set the channel's state ON or OFF
- *
- * Input parameters:
+ *
+ * Input parameters:
* state - new state
- *
+ *
* Return value:
* old state
********************************************************************* */
@@ -1732,43 +1705,43 @@ static sbmac_state_t sbmac_set_channel_state(struct sbmac_softc *sc,
sbmac_state_t state)
{
sbmac_state_t oldstate = sc->sbm_state;
-
+
/*
* If same as previous state, return
*/
-
+
if (state == oldstate) {
return oldstate;
}
-
+
/*
- * If new state is ON, turn channel on
+ * If new state is ON, turn channel on
*/
-
+
if (state == sbmac_state_on) {
sbmac_channel_start(sc);
}
else {
sbmac_channel_stop(sc);
}
-
+
/*
* Return previous state
*/
-
+
return oldstate;
}
/**********************************************************************
* SBMAC_PROMISCUOUS_MODE(sc,onoff)
- *
+ *
* Turn on or off promiscuous mode
- *
- * Input parameters:
+ *
+ * Input parameters:
* sc - softc
* onoff - 1 to turn on, 0 to turn off
- *
+ *
* Return value:
* nothing
********************************************************************* */
@@ -1776,30 +1749,30 @@ static sbmac_state_t sbmac_set_channel_state(struct sbmac_softc *sc,
static void sbmac_promiscuous_mode(struct sbmac_softc *sc,int onoff)
{
uint64_t reg;
-
+
if (sc->sbm_state != sbmac_state_on)
return;
-
+
if (onoff) {
- reg = SBMAC_READCSR(sc->sbm_rxfilter);
+ reg = __raw_readq(sc->sbm_rxfilter);
reg |= M_MAC_ALLPKT_EN;
- SBMAC_WRITECSR(sc->sbm_rxfilter,reg);
- }
+ __raw_writeq(reg, sc->sbm_rxfilter);
+ }
else {
- reg = SBMAC_READCSR(sc->sbm_rxfilter);
+ reg = __raw_readq(sc->sbm_rxfilter);
reg &= ~M_MAC_ALLPKT_EN;
- SBMAC_WRITECSR(sc->sbm_rxfilter,reg);
+ __raw_writeq(reg, sc->sbm_rxfilter);
}
}
/**********************************************************************
* SBMAC_SETIPHDR_OFFSET(sc,onoff)
- *
+ *
* Set the iphdr offset as 15 assuming ethernet encapsulation
- *
- * Input parameters:
+ *
+ * Input parameters:
* sc - softc
- *
+ *
* Return value:
* nothing
********************************************************************* */
@@ -1807,12 +1780,12 @@ static void sbmac_promiscuous_mode(struct sbmac_softc *sc,int onoff)
static void sbmac_set_iphdr_offset(struct sbmac_softc *sc)
{
uint64_t reg;
-
+
/* Hard code the off set to 15 for now */
- reg = SBMAC_READCSR(sc->sbm_rxfilter);
+ reg = __raw_readq(sc->sbm_rxfilter);
reg &= ~M_MAC_IPHDR_OFFSET | V_MAC_IPHDR_OFFSET(15);
- SBMAC_WRITECSR(sc->sbm_rxfilter,reg);
-
+ __raw_writeq(reg, sc->sbm_rxfilter);
+
/* read system identification to determine revision */
if (periph_rev >= 2) {
sc->rx_hw_checksum = ENABLE;
@@ -1824,13 +1797,13 @@ static void sbmac_set_iphdr_offset(struct sbmac_softc *sc)
/**********************************************************************
* SBMAC_ADDR2REG(ptr)
- *
+ *
* Convert six bytes into the 64-bit register value that
* we typically write into the SBMAC's address/mcast registers
- *
- * Input parameters:
+ *
+ * Input parameters:
* ptr - pointer to 6 bytes
- *
+ *
* Return value:
* register value
********************************************************************* */
@@ -1838,35 +1811,35 @@ static void sbmac_set_iphdr_offset(struct sbmac_softc *sc)
static uint64_t sbmac_addr2reg(unsigned char *ptr)
{
uint64_t reg = 0;
-
+
ptr += 6;
-
- reg |= (uint64_t) *(--ptr);
+
+ reg |= (uint64_t) *(--ptr);
reg <<= 8;
- reg |= (uint64_t) *(--ptr);
+ reg |= (uint64_t) *(--ptr);
reg <<= 8;
- reg |= (uint64_t) *(--ptr);
+ reg |= (uint64_t) *(--ptr);
reg <<= 8;
- reg |= (uint64_t) *(--ptr);
+ reg |= (uint64_t) *(--ptr);
reg <<= 8;
- reg |= (uint64_t) *(--ptr);
+ reg |= (uint64_t) *(--ptr);
reg <<= 8;
- reg |= (uint64_t) *(--ptr);
-
+ reg |= (uint64_t) *(--ptr);
+
return reg;
}
/**********************************************************************
* SBMAC_SET_SPEED(s,speed)
- *
+ *
* Configure LAN speed for the specified MAC.
* Warning: must be called when MAC is off!
- *
- * Input parameters:
+ *
+ * Input parameters:
* s - sbmac structure
* speed - speed to set MAC to (see sbmac_speed_t enum)
- *
+ *
* Return value:
* 1 if successful
* 0 indicates invalid parameters
@@ -1880,31 +1853,31 @@ static int sbmac_set_speed(struct sbmac_softc *s,sbmac_speed_t speed)
/*
* Save new current values
*/
-
+
s->sbm_speed = speed;
-
+
if (s->sbm_state == sbmac_state_on)
return 0; /* save for next restart */
/*
- * Read current register values
+ * Read current register values
*/
-
- cfg = SBMAC_READCSR(s->sbm_maccfg);
- framecfg = SBMAC_READCSR(s->sbm_framecfg);
-
+
+ cfg = __raw_readq(s->sbm_maccfg);
+ framecfg = __raw_readq(s->sbm_framecfg);
+
/*
* Mask out the stuff we want to change
*/
-
+
cfg &= ~(M_MAC_BURST_EN | M_MAC_SPEED_SEL);
framecfg &= ~(M_MAC_IFG_RX | M_MAC_IFG_TX | M_MAC_IFG_THRSH |
M_MAC_SLOT_SIZE);
-
+
/*
* Now add in the new bits
*/
-
+
switch (speed) {
case sbmac_speed_10:
framecfg |= V_MAC_IFG_RX_10 |
@@ -1913,7 +1886,7 @@ static int sbmac_set_speed(struct sbmac_softc *s,sbmac_speed_t speed)
V_MAC_SLOT_SIZE_10;
cfg |= V_MAC_SPEED_SEL_10MBPS;
break;
-
+
case sbmac_speed_100:
framecfg |= V_MAC_IFG_RX_100 |
V_MAC_IFG_TX_100 |
@@ -1921,7 +1894,7 @@ static int sbmac_set_speed(struct sbmac_softc *s,sbmac_speed_t speed)
V_MAC_SLOT_SIZE_100;
cfg |= V_MAC_SPEED_SEL_100MBPS ;
break;
-
+
case sbmac_speed_1000:
framecfg |= V_MAC_IFG_RX_1000 |
V_MAC_IFG_TX_1000 |
@@ -1929,34 +1902,34 @@ static int sbmac_set_speed(struct sbmac_softc *s,sbmac_speed_t speed)
V_MAC_SLOT_SIZE_1000;
cfg |= V_MAC_SPEED_SEL_1000MBPS | M_MAC_BURST_EN;
break;
-
+
case sbmac_speed_auto: /* XXX not implemented */
/* fall through */
default:
return 0;
}
-
+
/*
- * Send the bits back to the hardware
+ * Send the bits back to the hardware
*/
-
- SBMAC_WRITECSR(s->sbm_framecfg,framecfg);
- SBMAC_WRITECSR(s->sbm_maccfg,cfg);
-
+
+ __raw_writeq(framecfg, s->sbm_framecfg);
+ __raw_writeq(cfg, s->sbm_maccfg);
+
return 1;
}
/**********************************************************************
* SBMAC_SET_DUPLEX(s,duplex,fc)
- *
+ *
* Set Ethernet duplex and flow control options for this MAC
* Warning: must be called when MAC is off!
- *
- * Input parameters:
+ *
+ * Input parameters:
* s - sbmac structure
* duplex - duplex setting (see sbmac_duplex_t)
* fc - flow control setting (see sbmac_fc_t)
- *
+ *
* Return value:
* 1 if ok
* 0 if an invalid parameter combination was specified
@@ -1965,67 +1938,67 @@ static int sbmac_set_speed(struct sbmac_softc *s,sbmac_speed_t speed)
static int sbmac_set_duplex(struct sbmac_softc *s,sbmac_duplex_t duplex,sbmac_fc_t fc)
{
uint64_t cfg;
-
+
/*
* Save new current values
*/
-
+
s->sbm_duplex = duplex;
s->sbm_fc = fc;
-
+
if (s->sbm_state == sbmac_state_on)
return 0; /* save for next restart */
-
+
/*
- * Read current register values
+ * Read current register values
*/
-
- cfg = SBMAC_READCSR(s->sbm_maccfg);
-
+
+ cfg = __raw_readq(s->sbm_maccfg);
+
/*
* Mask off the stuff we're about to change
*/
-
+
cfg &= ~(M_MAC_FC_SEL | M_MAC_FC_CMD | M_MAC_HDX_EN);
-
-
+
+
switch (duplex) {
case sbmac_duplex_half:
switch (fc) {
case sbmac_fc_disabled:
cfg |= M_MAC_HDX_EN | V_MAC_FC_CMD_DISABLED;
break;
-
+
case sbmac_fc_collision:
cfg |= M_MAC_HDX_EN | V_MAC_FC_CMD_ENABLED;
break;
-
+
case sbmac_fc_carrier:
cfg |= M_MAC_HDX_EN | V_MAC_FC_CMD_ENAB_FALSECARR;
break;
-
+
case sbmac_fc_auto: /* XXX not implemented */
- /* fall through */
+ /* fall through */
case sbmac_fc_frame: /* not valid in half duplex */
default: /* invalid selection */
return 0;
}
break;
-
+
case sbmac_duplex_full:
switch (fc) {
case sbmac_fc_disabled:
cfg |= V_MAC_FC_CMD_DISABLED;
break;
-
+
case sbmac_fc_frame:
cfg |= V_MAC_FC_CMD_ENABLED;
break;
-
+
case sbmac_fc_collision: /* not valid in full duplex */
case sbmac_fc_carrier: /* not valid in full duplex */
case sbmac_fc_auto: /* XXX not implemented */
- /* fall through */
+ /* fall through */
default:
return 0;
}
@@ -2034,13 +2007,13 @@ static int sbmac_set_duplex(struct sbmac_softc *s,sbmac_duplex_t duplex,sbmac_fc
/* XXX not implemented */
break;
}
-
+
/*
- * Send the bits back to the hardware
+ * Send the bits back to the hardware
*/
-
- SBMAC_WRITECSR(s->sbm_maccfg,cfg);
-
+
+ __raw_writeq(cfg, s->sbm_maccfg);
+
return 1;
}
@@ -2049,12 +2022,12 @@ static int sbmac_set_duplex(struct sbmac_softc *s,sbmac_duplex_t duplex,sbmac_fc
/**********************************************************************
* SBMAC_INTR()
- *
+ *
* Interrupt handler for MAC interrupts
- *
- * Input parameters:
+ *
+ * Input parameters:
* MAC structure
- *
+ *
* Return value:
* nothing
********************************************************************* */
@@ -2066,27 +2039,27 @@ static irqreturn_t sbmac_intr(int irq,void *dev_instance,struct pt_regs *rgs)
int handled = 0;
for (;;) {
-
+
/*
* Read the ISR (this clears the bits in the real
* register, except for counter addr)
*/
-
- isr = SBMAC_READCSR(sc->sbm_isr) & ~M_MAC_COUNTER_ADDR;
-
+
+ isr = __raw_readq(sc->sbm_isr) & ~M_MAC_COUNTER_ADDR;
+
if (isr == 0)
break;
handled = 1;
-
+
/*
* Transmits on channel 0
*/
-
+
if (isr & (M_MAC_INT_CHANNEL << S_MAC_TX_CH0)) {
sbdma_tx_process(sc,&(sc->sbm_txdma));
}
-
+
/*
* Receives on channel 0
*/
@@ -2106,8 +2079,8 @@ static irqreturn_t sbmac_intr(int irq,void *dev_instance,struct pt_regs *rgs)
* EOP_SEEN here takes care of this case.
* (EOP_SEEN is part of M_MAC_INT_CHANNEL << S_MAC_RX_CH0)
*/
-
-
+
+
if (isr & (M_MAC_INT_CHANNEL << S_MAC_RX_CH0)) {
sbdma_rx_process(sc,&(sc->sbm_rxdma));
}
@@ -2118,29 +2091,29 @@ static irqreturn_t sbmac_intr(int irq,void *dev_instance,struct pt_regs *rgs)
/**********************************************************************
* SBMAC_START_TX(skb,dev)
- *
- * Start output on the specified interface. Basically, we
+ *
+ * Start output on the specified interface. Basically, we
* queue as many buffers as we can until the ring fills up, or
* we run off the end of the queue, whichever comes first.
- *
- * Input parameters:
- *
- *
+ *
+ * Input parameters:
+ *
+ *
* Return value:
* nothing
********************************************************************* */
static int sbmac_start_tx(struct sk_buff *skb, struct net_device *dev)
{
struct sbmac_softc *sc = netdev_priv(dev);
-
+
/* lock eth irq */
spin_lock_irq (&sc->sbm_lock);
-
+
/*
- * Put the buffer on the transmit ring. If we
+ * Put the buffer on the transmit ring. If we
* don't have room, stop the queue.
*/
-
+
if (sbdma_add_txbuffer(&(sc->sbm_txdma),skb)) {
/* XXX save skb that we could not send */
netif_stop_queue(dev);
@@ -2148,24 +2121,24 @@ static int sbmac_start_tx(struct sk_buff *skb, struct net_device *dev)
return 1;
}
-
+
dev->trans_start = jiffies;
-
+
spin_unlock_irq (&sc->sbm_lock);
-
+
return 0;
}
/**********************************************************************
* SBMAC_SETMULTI(sc)
- *
+ *
* Reprogram the multicast table into the hardware, given
* the list of multicasts associated with the interface
* structure.
- *
- * Input parameters:
+ *
+ * Input parameters:
* sc - softc
- *
+ *
* Return value:
* nothing
********************************************************************* */
@@ -2173,75 +2146,75 @@ static int sbmac_start_tx(struct sk_buff *skb, struct net_device *dev)
static void sbmac_setmulti(struct sbmac_softc *sc)
{
uint64_t reg;
- sbmac_port_t port;
+ volatile void __iomem *port;
int idx;
struct dev_mc_list *mclist;
struct net_device *dev = sc->sbm_dev;
-
- /*
+
+ /*
* Clear out entire multicast table. We do this by nuking
* the entire hash table and all the direct matches except
- * the first one, which is used for our station address
+ * the first one, which is used for our station address
*/
-
+
for (idx = 1; idx < MAC_ADDR_COUNT; idx++) {
port = sc->sbm_base + R_MAC_ADDR_BASE+(idx*sizeof(uint64_t));
- SBMAC_WRITECSR(port,0);
+ __raw_writeq(0, port);
}
-
+
for (idx = 0; idx < MAC_HASH_COUNT; idx++) {
port = sc->sbm_base + R_MAC_HASH_BASE+(idx*sizeof(uint64_t));
- SBMAC_WRITECSR(port,0);
+ __raw_writeq(0, port);
}
-
+
/*
* Clear the filter to say we don't want any multicasts.
*/
-
- reg = SBMAC_READCSR(sc->sbm_rxfilter);
+
+ reg = __raw_readq(sc->sbm_rxfilter);
reg &= ~(M_MAC_MCAST_INV | M_MAC_MCAST_EN);
- SBMAC_WRITECSR(sc->sbm_rxfilter,reg);
-
+ __raw_writeq(reg, sc->sbm_rxfilter);
+
if (dev->flags & IFF_ALLMULTI) {
- /*
- * Enable ALL multicasts. Do this by inverting the
- * multicast enable bit.
+ /*
+ * Enable ALL multicasts. Do this by inverting the
+ * multicast enable bit.
*/
- reg = SBMAC_READCSR(sc->sbm_rxfilter);
+ reg = __raw_readq(sc->sbm_rxfilter);
reg |= (M_MAC_MCAST_INV | M_MAC_MCAST_EN);
- SBMAC_WRITECSR(sc->sbm_rxfilter,reg);
+ __raw_writeq(reg, sc->sbm_rxfilter);
return;
}
-
- /*
+
+ /*
* Progam new multicast entries. For now, only use the
* perfect filter. In the future we'll need to use the
* hash filter if the perfect filter overflows
*/
-
+
/* XXX only using perfect filter for now, need to use hash
* XXX if the table overflows */
-
+
idx = 1; /* skip station address */
mclist = dev->mc_list;
while (mclist && (idx < MAC_ADDR_COUNT)) {
reg = sbmac_addr2reg(mclist->dmi_addr);
port = sc->sbm_base + R_MAC_ADDR_BASE+(idx * sizeof(uint64_t));
- SBMAC_WRITECSR(port,reg);
+ __raw_writeq(reg, port);
idx++;
mclist = mclist->next;
}
-
- /*
+
+ /*
* Enable the "accept multicast bits" if we programmed at least one
- * multicast.
+ * multicast.
*/
-
+
if (idx > 1) {
- reg = SBMAC_READCSR(sc->sbm_rxfilter);
+ reg = __raw_readq(sc->sbm_rxfilter);
reg |= M_MAC_MCAST_EN;
- SBMAC_WRITECSR(sc->sbm_rxfilter,reg);
+ __raw_writeq(reg, sc->sbm_rxfilter);
}
}
@@ -2250,12 +2223,12 @@ static void sbmac_setmulti(struct sbmac_softc *sc)
#if defined(SBMAC_ETH0_HWADDR) || defined(SBMAC_ETH1_HWADDR) || defined(SBMAC_ETH2_HWADDR)
/**********************************************************************
* SBMAC_PARSE_XDIGIT(str)
- *
+ *
* Parse a hex digit, returning its value
- *
- * Input parameters:
+ *
+ * Input parameters:
* str - character
- *
+ *
* Return value:
* hex value, or -1 if invalid
********************************************************************* */
@@ -2263,7 +2236,7 @@ static void sbmac_setmulti(struct sbmac_softc *sc)
static int sbmac_parse_xdigit(char str)
{
int digit;
-
+
if ((str >= '0') && (str <= '9'))
digit = str - '0';
else if ((str >= 'a') && (str <= 'f'))
@@ -2272,20 +2245,20 @@ static int sbmac_parse_xdigit(char str)
digit = str - 'A' + 10;
else
return -1;
-
+
return digit;
}
/**********************************************************************
* SBMAC_PARSE_HWADDR(str,hwaddr)
- *
+ *
* Convert a string in the form xx:xx:xx:xx:xx:xx into a 6-byte
* Ethernet address.
- *
- * Input parameters:
+ *
+ * Input parameters:
* str - string
* hwaddr - pointer to hardware address
- *
+ *
* Return value:
* 0 if ok, else -1
********************************************************************* */
@@ -2294,7 +2267,7 @@ static int sbmac_parse_hwaddr(char *str, unsigned char *hwaddr)
{
int digit1,digit2;
int idx = 6;
-
+
while (*str && (idx > 0)) {
digit1 = sbmac_parse_xdigit(*str);
if (digit1 < 0)
@@ -2302,7 +2275,7 @@ static int sbmac_parse_hwaddr(char *str, unsigned char *hwaddr)
str++;
if (!*str)
return -1;
-
+
if ((*str == ':') || (*str == '-')) {
digit2 = digit1;
digit1 = 0;
@@ -2313,10 +2286,10 @@ static int sbmac_parse_hwaddr(char *str, unsigned char *hwaddr)
return -1;
str++;
}
-
+
*hwaddr++ = (digit1 << 4) | digit2;
idx--;
-
+
if (*str == '-')
str++;
if (*str == ':')
@@ -2337,12 +2310,12 @@ static int sb1250_change_mtu(struct net_device *_dev, int new_mtu)
/**********************************************************************
* SBMAC_INIT(dev)
- *
+ *
* Attach routine - init hardware and hook ourselves into linux
- *
- * Input parameters:
+ *
+ * Input parameters:
* dev - net_device structure
- *
+ *
* Return value:
* status
********************************************************************* */
@@ -2354,53 +2327,53 @@ static int sbmac_init(struct net_device *dev, int idx)
uint64_t ea_reg;
int i;
int err;
-
+
sc = netdev_priv(dev);
-
+
/* Determine controller base address */
-
+
sc->sbm_base = IOADDR(dev->base_addr);
sc->sbm_dev = dev;
sc->sbe_idx = idx;
-
+
eaddr = sc->sbm_hwaddr;
-
- /*
+
+ /*
* Read the ethernet address. The firwmare left this programmed
* for us in the ethernet address register for each mac.
*/
-
- ea_reg = SBMAC_READCSR(sc->sbm_base + R_MAC_ETHERNET_ADDR);
- SBMAC_WRITECSR(sc->sbm_base + R_MAC_ETHERNET_ADDR, 0);
+
+ ea_reg = __raw_readq(sc->sbm_base + R_MAC_ETHERNET_ADDR);
+ __raw_writeq(0, sc->sbm_base + R_MAC_ETHERNET_ADDR);
for (i = 0; i < 6; i++) {
eaddr[i] = (uint8_t) (ea_reg & 0xFF);
ea_reg >>= 8;
}
-
+
for (i = 0; i < 6; i++) {
dev->dev_addr[i] = eaddr[i];
}
-
-
+
+
/*
- * Init packet size
+ * Init packet size
*/
-
+
sc->sbm_buffersize = ENET_PACKET_SIZE + SMP_CACHE_BYTES * 2 + ETHER_ALIGN;
- /*
+ /*
* Initialize context (get pointers to registers and stuff), then
* allocate the memory for the descriptor tables.
*/
-
+
sbmac_initctx(sc);
-
+
/*
* Set up Linux device callins
*/
-
+
spin_lock_init(&(sc->sbm_lock));
-
+
dev->open = sbmac_open;
dev->hard_start_xmit = sbmac_start_tx;
dev->stop = sbmac_close;
@@ -2419,7 +2392,7 @@ static int sbmac_init(struct net_device *dev, int idx)
if (err)
goto out_uninit;
- if (periph_rev >= 2) {
+ if (sc->rx_hw_checksum == ENABLE) {
printk(KERN_INFO "%s: enabling TCP rcv checksum\n",
sc->sbm_dev->name);
}
@@ -2430,10 +2403,10 @@ static int sbmac_init(struct net_device *dev, int idx)
* was being displayed)
*/
printk(KERN_INFO
- "%s: SiByte Ethernet at 0x%08lX, address: %02X:%02X:%02X:%02X:%02X:%02X\n",
+ "%s: SiByte Ethernet at 0x%08lX, address: %02X:%02X:%02X:%02X:%02X:%02X\n",
dev->name, dev->base_addr,
eaddr[0],eaddr[1],eaddr[2],eaddr[3],eaddr[4],eaddr[5]);
-
+
return 0;
@@ -2447,54 +2420,86 @@ out_uninit:
static int sbmac_open(struct net_device *dev)
{
struct sbmac_softc *sc = netdev_priv(dev);
-
+
if (debug > 1) {
printk(KERN_DEBUG "%s: sbmac_open() irq %d.\n", dev->name, dev->irq);
}
-
- /*
+
+ /*
* map/route interrupt (clear status first, in case something
* weird is pending; we haven't initialized the mac registers
* yet)
*/
- SBMAC_READCSR(sc->sbm_isr);
+ __raw_readq(sc->sbm_isr);
if (request_irq(dev->irq, &sbmac_intr, SA_SHIRQ, dev->name, dev))
return -EBUSY;
/*
- * Configure default speed
+ * Probe phy address
+ */
+
+ if(sbmac_mii_probe(dev) == -1) {
+ printk("%s: failed to probe PHY.\n", dev->name);
+ return -EINVAL;
+ }
+
+ /*
+ * Configure default speed
*/
sbmac_mii_poll(sc,noisy_mii);
-
+
/*
* Turn on the channel
*/
sbmac_set_channel_state(sc,sbmac_state_on);
-
+
/*
* XXX Station address is in dev->dev_addr
*/
-
+
if (dev->if_port == 0)
- dev->if_port = 0;
-
+ dev->if_port = 0;
+
netif_start_queue(dev);
-
+
sbmac_set_rx_mode(dev);
-
+
/* Set the timer to check for link beat. */
init_timer(&sc->sbm_timer);
sc->sbm_timer.expires = jiffies + 2 * HZ/100;
sc->sbm_timer.data = (unsigned long)dev;
sc->sbm_timer.function = &sbmac_timer;
add_timer(&sc->sbm_timer);
-
+
return 0;
}
+static int sbmac_mii_probe(struct net_device *dev)
+{
+ int i;
+ struct sbmac_softc *s = netdev_priv(dev);
+ u16 bmsr, id1, id2;
+ u32 vendor, device;
+
+ for (i=1; i<31; i++) {
+ bmsr = sbmac_mii_read(s, i, MII_BMSR);
+ if (bmsr != 0) {
+ s->sbm_phys[0] = i;
+ id1 = sbmac_mii_read(s, i, MII_PHYIDR1);
+ id2 = sbmac_mii_read(s, i, MII_PHYIDR2);
+ vendor = ((u32)id1 << 6) | ((id2 >> 10) & 0x3f);
+ device = (id2 >> 4) & 0x3f;
+
+ printk(KERN_INFO "%s: found phy %d, vendor %06x part %02x\n",
+ dev->name, i, vendor, device);
+ return i;
+ }
+ }
+ return -1;
+}
static int sbmac_mii_poll(struct sbmac_softc *s,int noisy)
@@ -2609,20 +2614,20 @@ static void sbmac_timer(unsigned long data)
int mii_status;
spin_lock_irq (&sc->sbm_lock);
-
+
/* make IFF_RUNNING follow the MII status bit "Link established" */
mii_status = sbmac_mii_read(sc, sc->sbm_phys[0], MII_BMSR);
-
+
if ( (mii_status & BMSR_LINKSTAT) != (sc->sbm_phy_oldlinkstat) ) {
sc->sbm_phy_oldlinkstat = mii_status & BMSR_LINKSTAT;
if (mii_status & BMSR_LINKSTAT) {
netif_carrier_on(dev);
}
else {
- netif_carrier_off(dev);
+ netif_carrier_off(dev);
}
}
-
+
/*
* Poll the PHY to see what speed we should be running at
*/
@@ -2640,9 +2645,9 @@ static void sbmac_timer(unsigned long data)
sbmac_channel_start(sc);
}
}
-
+
spin_unlock_irq (&sc->sbm_lock);
-
+
sc->sbm_timer.expires = jiffies + next_tick;
add_timer(&sc->sbm_timer);
}
@@ -2651,13 +2656,13 @@ static void sbmac_timer(unsigned long data)
static void sbmac_tx_timeout (struct net_device *dev)
{
struct sbmac_softc *sc = netdev_priv(dev);
-
+
spin_lock_irq (&sc->sbm_lock);
-
-
+
+
dev->trans_start = jiffies;
sc->sbm_stats.tx_errors++;
-
+
spin_unlock_irq (&sc->sbm_lock);
printk (KERN_WARNING "%s: Transmit timed out\n",dev->name);
@@ -2670,13 +2675,13 @@ static struct net_device_stats *sbmac_get_stats(struct net_device *dev)
{
struct sbmac_softc *sc = netdev_priv(dev);
unsigned long flags;
-
+
spin_lock_irqsave(&sc->sbm_lock, flags);
-
+
/* XXX update other stats here */
-
+
spin_unlock_irqrestore(&sc->sbm_lock, flags);
-
+
return &sc->sbm_stats;
}
@@ -2693,8 +2698,8 @@ static void sbmac_set_rx_mode(struct net_device *dev)
/*
* Promiscuous changed.
*/
-
- if (dev->flags & IFF_PROMISC) {
+
+ if (dev->flags & IFF_PROMISC) {
/* Unconditionally log net taps. */
msg_flag = 1;
sbmac_promiscuous_mode(sc,1);
@@ -2705,18 +2710,18 @@ static void sbmac_set_rx_mode(struct net_device *dev)
}
}
spin_unlock_irqrestore(&sc->sbm_lock, flags);
-
+
if (msg_flag) {
printk(KERN_NOTICE "%s: Promiscuous mode %sabled.\n",
dev->name,(msg_flag==1)?"en":"dis");
}
-
+
/*
* Program the multicasts. Do this every time.
*/
-
+
sbmac_setmulti(sc);
-
+
}
static int sbmac_mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
@@ -2725,10 +2730,10 @@ static int sbmac_mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
u16 *data = (u16 *)&rq->ifr_ifru;
unsigned long flags;
int retval;
-
+
spin_lock_irqsave(&sc->sbm_lock, flags);
retval = 0;
-
+
switch(cmd) {
case SIOCDEVPRIVATE: /* Get the address of the PHY in use. */
data[0] = sc->sbm_phys[0] & 0x1f;
@@ -2750,7 +2755,7 @@ static int sbmac_mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
default:
retval = -EOPNOTSUPP;
}
-
+
spin_unlock_irqrestore(&sc->sbm_lock, flags);
return retval;
}
@@ -2781,7 +2786,7 @@ static int sbmac_close(struct net_device *dev)
sbdma_emptyring(&(sc->sbm_txdma));
sbdma_emptyring(&(sc->sbm_rxdma));
-
+
return 0;
}
@@ -2793,13 +2798,13 @@ sbmac_setup_hwaddr(int chan,char *addr)
{
uint8_t eaddr[6];
uint64_t val;
- sbmac_port_t port;
+ unsigned long port;
port = A_MAC_CHANNEL_BASE(chan);
sbmac_parse_hwaddr(addr,eaddr);
val = sbmac_addr2reg(eaddr);
- SBMAC_WRITECSR(IOADDR(port+R_MAC_ETHERNET_ADDR),val);
- val = SBMAC_READCSR(IOADDR(port+R_MAC_ETHERNET_ADDR));
+ __raw_writeq(val, IOADDR(port+R_MAC_ETHERNET_ADDR));
+ val = __raw_readq(IOADDR(port+R_MAC_ETHERNET_ADDR));
}
#endif
@@ -2810,9 +2815,9 @@ sbmac_init_module(void)
{
int idx;
struct net_device *dev;
- sbmac_port_t port;
+ unsigned long port;
int chip_max_units;
-
+
/*
* For bringup when not using the firmware, we can pre-fill
* the MAC addresses using the environment variables
@@ -2858,13 +2863,13 @@ sbmac_init_module(void)
port = A_MAC_CHANNEL_BASE(idx);
- /*
+ /*
* The R_MAC_ETHERNET_ADDR register will be set to some nonzero
* value for us by the firmware if we're going to use this MAC.
* If we find a zero, skip this MAC.
*/
- sbmac_orig_hwaddr[idx] = SBMAC_READCSR(IOADDR(port+R_MAC_ETHERNET_ADDR));
+ sbmac_orig_hwaddr[idx] = __raw_readq(IOADDR(port+R_MAC_ETHERNET_ADDR));
if (sbmac_orig_hwaddr[idx] == 0) {
printk(KERN_DEBUG "sbmac: not configuring MAC at "
"%lx\n", port);
@@ -2876,7 +2881,7 @@ sbmac_init_module(void)
*/
dev = alloc_etherdev(sizeof(struct sbmac_softc));
- if (!dev)
+ if (!dev)
return -ENOMEM; /* return ENOMEM */
printk(KERN_DEBUG "sbmac: configuring MAC at %lx\n", port);
@@ -2886,8 +2891,7 @@ sbmac_init_module(void)
dev->mem_end = 0;
if (sbmac_init(dev, idx)) {
port = A_MAC_CHANNEL_BASE(idx);
- SBMAC_WRITECSR(IOADDR(port+R_MAC_ETHERNET_ADDR),
- sbmac_orig_hwaddr[idx]);
+ __raw_writeq(sbmac_orig_hwaddr[idx], IOADDR(port+R_MAC_ETHERNET_ADDR));
free_netdev(dev);
continue;
}
diff --git a/drivers/net/sgiseeq.c b/drivers/net/sgiseeq.c
index 9bc3b1c0dd6..a4614df38a9 100644
--- a/drivers/net/sgiseeq.c
+++ b/drivers/net/sgiseeq.c
@@ -32,8 +32,6 @@
#include "sgiseeq.h"
-static char *version = "sgiseeq.c: David S. Miller (dm@engr.sgi.com)\n";
-
static char *sgiseeqstr = "SGI Seeq8003";
/*
@@ -113,9 +111,9 @@ static struct net_device *root_sgiseeq_dev;
static inline void hpc3_eth_reset(struct hpc3_ethregs *hregs)
{
- hregs->rx_reset = HPC3_ERXRST_CRESET | HPC3_ERXRST_CLRIRQ;
+ hregs->reset = HPC3_ERST_CRESET | HPC3_ERST_CLRIRQ;
udelay(20);
- hregs->rx_reset = 0;
+ hregs->reset = 0;
}
static inline void reset_hpc3_and_seeq(struct hpc3_ethregs *hregs,
@@ -252,7 +250,6 @@ void sgiseeq_dump_rings(void)
#define TSTAT_INIT_SEEQ (SEEQ_TCMD_IPT|SEEQ_TCMD_I16|SEEQ_TCMD_IC|SEEQ_TCMD_IUF)
#define TSTAT_INIT_EDLC ((TSTAT_INIT_SEEQ) | SEEQ_TCMD_RB2)
-#define RDMACFG_INIT (HPC3_ERXDCFG_FRXDC | HPC3_ERXDCFG_FEOP | HPC3_ERXDCFG_FIRQ)
static int init_seeq(struct net_device *dev, struct sgiseeq_private *sp,
struct sgiseeq_regs *sregs)
@@ -274,8 +271,6 @@ static int init_seeq(struct net_device *dev, struct sgiseeq_private *sp,
sregs->tstat = TSTAT_INIT_SEEQ;
}
- hregs->rx_dconfig |= RDMACFG_INIT;
-
hregs->rx_ndptr = CPHYSADDR(sp->rx_desc);
hregs->tx_ndptr = CPHYSADDR(sp->tx_desc);
@@ -446,7 +441,7 @@ static irqreturn_t sgiseeq_interrupt(int irq, void *dev_id, struct pt_regs *regs
spin_lock(&sp->tx_lock);
/* Ack the IRQ and set software state. */
- hregs->rx_reset = HPC3_ERXRST_CLRIRQ;
+ hregs->reset = HPC3_ERST_CLRIRQ;
/* Always check for received packets. */
sgiseeq_rx(dev, sp, hregs, sregs);
@@ -493,11 +488,13 @@ static int sgiseeq_close(struct net_device *dev)
{
struct sgiseeq_private *sp = netdev_priv(dev);
struct sgiseeq_regs *sregs = sp->sregs;
+ unsigned int irq = dev->irq;
netif_stop_queue(dev);
/* Shutdown the Seeq. */
reset_hpc3_and_seeq(sp->hregs, sregs);
+ free_irq(irq, dev);
return 0;
}
@@ -644,7 +641,7 @@ static inline void setup_rx_ring(struct sgiseeq_rx_desc *buf, int nbufs)
#define ALIGNED(x) ((((unsigned long)(x)) + 0xf) & ~(0xf))
-static int sgiseeq_init(struct hpc3_regs* regs, int irq)
+static int sgiseeq_init(struct hpc3_regs* hpcregs, int irq)
{
struct sgiseeq_init_block *sr;
struct sgiseeq_private *sp;
@@ -680,8 +677,8 @@ static int sgiseeq_init(struct hpc3_regs* regs, int irq)
gpriv = sp;
gdev = dev;
#endif
- sp->sregs = (struct sgiseeq_regs *) &hpc3c0->eth_ext[0];
- sp->hregs = &hpc3c0->ethregs;
+ sp->sregs = (struct sgiseeq_regs *) &hpcregs->eth_ext[0];
+ sp->hregs = &hpcregs->ethregs;
sp->name = sgiseeqstr;
sp->mode = SEEQ_RCMD_RBCAST;
@@ -698,6 +695,11 @@ static int sgiseeq_init(struct hpc3_regs* regs, int irq)
setup_rx_ring(sp->rx_desc, SEEQ_RX_BUFFERS);
setup_tx_ring(sp->tx_desc, SEEQ_TX_BUFFERS);
+ /* Setup PIO and DMA transfer timing */
+ sp->hregs->pconfig = 0x161;
+ sp->hregs->dconfig = HPC3_EDCFG_FIRQ | HPC3_EDCFG_FEOP |
+ HPC3_EDCFG_FRXDC | HPC3_EDCFG_PTO | 0x026;
+
/* Reset the chip. */
hpc3_eth_reset(sp->hregs);
@@ -724,7 +726,7 @@ static int sgiseeq_init(struct hpc3_regs* regs, int irq)
goto err_out_free_page;
}
- printk(KERN_INFO "%s: SGI Seeq8003 ", dev->name);
+ printk(KERN_INFO "%s: %s ", dev->name, sgiseeqstr);
for (i = 0; i < 6; i++)
printk("%2.2x%c", dev->dev_addr[i], i == 5 ? '\n' : ':');
@@ -734,7 +736,7 @@ static int sgiseeq_init(struct hpc3_regs* regs, int irq)
return 0;
err_out_free_page:
- free_page((unsigned long) sp);
+ free_page((unsigned long) sp->srings);
err_out_free_dev:
kfree(dev);
@@ -744,8 +746,6 @@ err_out:
static int __init sgiseeq_probe(void)
{
- printk(version);
-
/* On board adapter on 1st HPC is always present */
return sgiseeq_init(hpc3c0, SGI_ENET_IRQ);
}
@@ -754,15 +754,12 @@ static void __exit sgiseeq_exit(void)
{
struct net_device *next, *dev;
struct sgiseeq_private *sp;
- int irq;
for (dev = root_sgiseeq_dev; dev; dev = next) {
sp = (struct sgiseeq_private *) netdev_priv(dev);
next = sp->next_module;
- irq = dev->irq;
unregister_netdev(dev);
- free_irq(irq, dev);
- free_page((unsigned long) sp);
+ free_page((unsigned long) sp->srings);
free_netdev(dev);
}
}
@@ -770,4 +767,6 @@ static void __exit sgiseeq_exit(void)
module_init(sgiseeq_probe);
module_exit(sgiseeq_exit);
+MODULE_DESCRIPTION("SGI Seeq 8003 driver");
+MODULE_AUTHOR("Linux/MIPS Mailing List <linux-mips@linux-mips.org>");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index c2e6484ef13..572f121b1f4 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -730,6 +730,7 @@ static struct ethtool_ops skge_ethtool_ops = {
.phys_id = skge_phys_id,
.get_stats_count = skge_get_stats_count,
.get_ethtool_stats = skge_get_ethtool_stats,
+ .get_perm_addr = ethtool_op_get_perm_addr,
};
/*
@@ -3096,6 +3097,7 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
/* read the mac address */
memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port*8, ETH_ALEN);
+ memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
/* device is off until link detection */
netif_carrier_off(dev);
diff --git a/drivers/net/sunbmac.c b/drivers/net/sunbmac.c
index f88f5e32b71..cfaf47c63c5 100644
--- a/drivers/net/sunbmac.c
+++ b/drivers/net/sunbmac.c
@@ -214,7 +214,8 @@ static void bigmac_init_rings(struct bigmac *bp, int from_irq)
{
struct bmac_init_block *bb = bp->bmac_block;
struct net_device *dev = bp->dev;
- int i, gfp_flags = GFP_KERNEL;
+ int i;
+ gfp_t gfp_flags = GFP_KERNEL;
if (from_irq || in_interrupt())
gfp_flags = GFP_ATOMIC;
diff --git a/drivers/net/sunbmac.h b/drivers/net/sunbmac.h
index 5674003fc38..b0dbc518714 100644
--- a/drivers/net/sunbmac.h
+++ b/drivers/net/sunbmac.h
@@ -339,7 +339,7 @@ struct bigmac {
#define ALIGNED_RX_SKB_ADDR(addr) \
((((unsigned long)(addr) + (64 - 1)) & ~(64 - 1)) - (unsigned long)(addr))
-static inline struct sk_buff *big_mac_alloc_skb(unsigned int length, int gfp_flags)
+static inline struct sk_buff *big_mac_alloc_skb(unsigned int length, gfp_t gfp_flags)
{
struct sk_buff *skb;
diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
index d500a5771db..5de0554fd7c 100644
--- a/drivers/net/sundance.c
+++ b/drivers/net/sundance.c
@@ -518,6 +518,7 @@ static int __devinit sundance_probe1 (struct pci_dev *pdev,
#else
int bar = 1;
#endif
+ int phy, phy_idx = 0;
/* when built into the kernel, we only print version if device is found */
@@ -549,6 +550,7 @@ static int __devinit sundance_probe1 (struct pci_dev *pdev,
for (i = 0; i < 3; i++)
((u16 *)dev->dev_addr)[i] =
le16_to_cpu(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET));
+ memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
dev->base_addr = (unsigned long)ioaddr;
dev->irq = irq;
@@ -605,33 +607,31 @@ static int __devinit sundance_probe1 (struct pci_dev *pdev,
printk("%2.2x:", dev->dev_addr[i]);
printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
- if (1) {
- int phy, phy_idx = 0;
- np->phys[0] = 1; /* Default setting */
- np->mii_preamble_required++;
- for (phy = 1; phy < 32 && phy_idx < MII_CNT; phy++) {
- int mii_status = mdio_read(dev, phy, MII_BMSR);
- if (mii_status != 0xffff && mii_status != 0x0000) {
- np->phys[phy_idx++] = phy;
- np->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE);
- if ((mii_status & 0x0040) == 0)
- np->mii_preamble_required++;
- printk(KERN_INFO "%s: MII PHY found at address %d, status "
- "0x%4.4x advertising %4.4x.\n",
- dev->name, phy, mii_status, np->mii_if.advertising);
- }
- }
- np->mii_preamble_required--;
-
- if (phy_idx == 0) {
- printk(KERN_INFO "%s: No MII transceiver found, aborting. ASIC status %x\n",
- dev->name, ioread32(ioaddr + ASICCtrl));
- goto err_out_unregister;
+ np->phys[0] = 1; /* Default setting */
+ np->mii_preamble_required++;
+ for (phy = 1; phy <= 32 && phy_idx < MII_CNT; phy++) {
+ int mii_status = mdio_read(dev, phy, MII_BMSR);
+ int phyx = phy & 0x1f;
+ if (mii_status != 0xffff && mii_status != 0x0000) {
+ np->phys[phy_idx++] = phyx;
+ np->mii_if.advertising = mdio_read(dev, phyx, MII_ADVERTISE);
+ if ((mii_status & 0x0040) == 0)
+ np->mii_preamble_required++;
+ printk(KERN_INFO "%s: MII PHY found at address %d, status "
+ "0x%4.4x advertising %4.4x.\n",
+ dev->name, phyx, mii_status, np->mii_if.advertising);
}
+ }
+ np->mii_preamble_required--;
- np->mii_if.phy_id = np->phys[0];
+ if (phy_idx == 0) {
+ printk(KERN_INFO "%s: No MII transceiver found, aborting. ASIC status %x\n",
+ dev->name, ioread32(ioaddr + ASICCtrl));
+ goto err_out_unregister;
}
+ np->mii_if.phy_id = np->phys[0];
+
/* Parse override configuration */
np->an_enable = 1;
if (card_idx < MAX_UNITS) {
@@ -692,7 +692,7 @@ static int __devinit sundance_probe1 (struct pci_dev *pdev,
/* Reset the chip to erase previous misconfiguration. */
if (netif_msg_hw(np))
printk("ASIC Control is %x.\n", ioread32(ioaddr + ASICCtrl));
- iowrite16(0x007f, ioaddr + ASICCtrl + 2);
+ iowrite16(0x00ff, ioaddr + ASICCtrl + 2);
if (netif_msg_hw(np))
printk("ASIC Control is now %x.\n", ioread32(ioaddr + ASICCtrl));
@@ -1619,6 +1619,7 @@ static struct ethtool_ops ethtool_ops = {
.get_link = get_link,
.get_msglevel = get_msglevel,
.set_msglevel = set_msglevel,
+ .get_perm_addr = ethtool_op_get_perm_addr,
};
static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
diff --git a/drivers/net/tokenring/ibmtr.c b/drivers/net/tokenring/ibmtr.c
index 32057e65808..9f491563944 100644
--- a/drivers/net/tokenring/ibmtr.c
+++ b/drivers/net/tokenring/ibmtr.c
@@ -318,7 +318,7 @@ static void ibmtr_cleanup_card(struct net_device *dev)
if (dev->base_addr) {
outb(0,dev->base_addr+ADAPTRESET);
- schedule_timeout(TR_RST_TIME); /* wait 50ms */
+ schedule_timeout_uninterruptible(TR_RST_TIME); /* wait 50ms */
outb(0,dev->base_addr+ADAPTRESETREL);
}
@@ -854,8 +854,7 @@ static int tok_init_card(struct net_device *dev)
writeb(~INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_EVEN);
outb(0, PIOaddr + ADAPTRESET);
- current->state=TASK_UNINTERRUPTIBLE;
- schedule_timeout(TR_RST_TIME); /* wait 50ms */
+ schedule_timeout_uninterruptible(TR_RST_TIME); /* wait 50ms */
outb(0, PIOaddr + ADAPTRESETREL);
#ifdef ENABLE_PAGING
@@ -903,8 +902,8 @@ static int tok_open(struct net_device *dev)
DPRINTK("Adapter is up and running\n");
return 0;
}
- current->state=TASK_INTERRUPTIBLE;
- i=schedule_timeout(TR_RETRY_INTERVAL); /* wait 30 seconds */
+ i=schedule_timeout_interruptible(TR_RETRY_INTERVAL);
+ /* wait 30 seconds */
if(i!=0) break; /*prob. a signal, like the i>24*HZ case above */
}
outb(0, dev->base_addr + ADAPTRESET);/* kill pending interrupts*/
diff --git a/drivers/net/tokenring/olympic.c b/drivers/net/tokenring/olympic.c
index 9e7923192a4..05477d24fd4 100644
--- a/drivers/net/tokenring/olympic.c
+++ b/drivers/net/tokenring/olympic.c
@@ -1101,7 +1101,7 @@ static int olympic_close(struct net_device *dev)
while(olympic_priv->srb_queued) {
- t = schedule_timeout(60*HZ);
+ t = schedule_timeout_interruptible(60*HZ);
if(signal_pending(current)) {
printk(KERN_WARNING "%s: SRB timed out.\n",dev->name);
diff --git a/drivers/net/tokenring/tms380tr.c b/drivers/net/tokenring/tms380tr.c
index 2e39bf1f746..c1925590a0e 100644
--- a/drivers/net/tokenring/tms380tr.c
+++ b/drivers/net/tokenring/tms380tr.c
@@ -1243,8 +1243,7 @@ void tms380tr_wait(unsigned long time)
tmp = jiffies + time/(1000000/HZ);
do {
- current->state = TASK_INTERRUPTIBLE;
- tmp = schedule_timeout(tmp);
+ tmp = schedule_timeout_interruptible(tmp);
} while(time_after(tmp, jiffies));
#else
udelay(time);
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
index a22d00198e4..6b8eee8f7bf 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/tulip/de2104x.c
@@ -1787,10 +1787,15 @@ static void __init de21041_get_srom_info (struct de_private *de)
/* DEC now has a specification but early board makers
just put the address in the first EEPROM locations. */
/* This does memcmp(eedata, eedata+16, 8) */
+
+#ifndef CONFIG_MIPS_COBALT
+
for (i = 0; i < 8; i ++)
if (ee_data[i] != ee_data[16+i])
sa_offset = 20;
+#endif
+
/* store MAC address */
for (i = 0; i < 6; i ++)
de->dev->dev_addr[i] = ee_data[i + sa_offset];
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index ecfa6f8805c..4c76cb794bf 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -419,10 +419,9 @@ typhoon_reset(void __iomem *ioaddr, int wait_type)
TYPHOON_STATUS_WAITING_FOR_HOST)
goto out;
- if(wait_type == WaitSleep) {
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(1);
- } else
+ if(wait_type == WaitSleep)
+ schedule_timeout_uninterruptible(1);
+ else
udelay(TYPHOON_UDELAY);
}
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c
index fc7738ffbff..24187158928 100644
--- a/drivers/net/via-rhine.c
+++ b/drivers/net/via-rhine.c
@@ -490,6 +490,8 @@ struct rhine_private {
u8 tx_thresh, rx_thresh;
struct mii_if_info mii_if;
+ struct work_struct tx_timeout_task;
+ struct work_struct check_media_task;
void __iomem *base;
};
@@ -497,6 +499,8 @@ static int mdio_read(struct net_device *dev, int phy_id, int location);
static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
static int rhine_open(struct net_device *dev);
static void rhine_tx_timeout(struct net_device *dev);
+static void rhine_tx_timeout_task(struct net_device *dev);
+static void rhine_check_media_task(struct net_device *dev);
static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev);
static irqreturn_t rhine_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
static void rhine_tx(struct net_device *dev);
@@ -814,8 +818,9 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
for (i = 0; i < 6; i++)
dev->dev_addr[i] = ioread8(ioaddr + StationAddr + i);
+ memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
- if (!is_valid_ether_addr(dev->dev_addr)) {
+ if (!is_valid_ether_addr(dev->perm_addr)) {
rc = -EIO;
printk(KERN_ERR "Invalid MAC address\n");
goto err_out_unmap;
@@ -850,6 +855,12 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
if (rp->quirks & rqRhineI)
dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
+ INIT_WORK(&rp->tx_timeout_task,
+ (void (*)(void *))rhine_tx_timeout_task, dev);
+
+ INIT_WORK(&rp->check_media_task,
+ (void (*)(void *))rhine_check_media_task, dev);
+
/* dev->name not defined before register_netdev()! */
rc = register_netdev(dev);
if (rc)
@@ -1076,6 +1087,11 @@ static void rhine_check_media(struct net_device *dev, unsigned int init_media)
ioaddr + ChipCmd1);
}
+static void rhine_check_media_task(struct net_device *dev)
+{
+ rhine_check_media(dev, 0);
+}
+
static void init_registers(struct net_device *dev)
{
struct rhine_private *rp = netdev_priv(dev);
@@ -1129,8 +1145,8 @@ static void rhine_disable_linkmon(void __iomem *ioaddr, u32 quirks)
if (quirks & rqRhineI) {
iowrite8(0x01, ioaddr + MIIRegAddr); // MII_BMSR
- /* Can be called from ISR. Evil. */
- mdelay(1);
+ /* Do not call from ISR! */
+ msleep(1);
/* 0x80 must be set immediately before turning it off */
iowrite8(0x80, ioaddr + MIICmd);
@@ -1220,6 +1236,16 @@ static int rhine_open(struct net_device *dev)
static void rhine_tx_timeout(struct net_device *dev)
{
struct rhine_private *rp = netdev_priv(dev);
+
+ /*
+ * Move bulk of work outside of interrupt context
+ */
+ schedule_work(&rp->tx_timeout_task);
+}
+
+static void rhine_tx_timeout_task(struct net_device *dev)
+{
+ struct rhine_private *rp = netdev_priv(dev);
void __iomem *ioaddr = rp->base;
printk(KERN_WARNING "%s: Transmit timed out, status %4.4x, PHY status "
@@ -1625,7 +1651,7 @@ static void rhine_error(struct net_device *dev, int intr_status)
spin_lock(&rp->lock);
if (intr_status & IntrLinkChange)
- rhine_check_media(dev, 0);
+ schedule_work(&rp->check_media_task);
if (intr_status & IntrStatsMax) {
rp->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
rp->stats.rx_missed_errors += ioread16(ioaddr + RxMissed);
@@ -1829,6 +1855,7 @@ static struct ethtool_ops netdev_ethtool_ops = {
.set_wol = rhine_set_wol,
.get_sg = ethtool_op_get_sg,
.get_tx_csum = ethtool_op_get_tx_csum,
+ .get_perm_addr = ethtool_op_get_perm_addr,
};
static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
@@ -1872,6 +1899,9 @@ static int rhine_close(struct net_device *dev)
spin_unlock_irq(&rp->lock);
free_irq(rp->pdev->irq, dev);
+
+ flush_scheduled_work();
+
free_rbufs(dev);
free_tbufs(dev);
free_ring(dev);
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index 7ff814fd65d..ae9e897c255 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -1617,8 +1617,7 @@ static int get_wait_data(struct cosa_data *cosa)
return r;
}
/* sleep if not ready to read */
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(1);
+ schedule_timeout_interruptible(1);
}
printk(KERN_INFO "cosa: timeout in get_wait_data (status 0x%x)\n",
cosa_getstatus(cosa));
@@ -1644,8 +1643,7 @@ static int put_wait_data(struct cosa_data *cosa, int data)
}
#if 0
/* sleep if not ready to read */
- current->state = TASK_INTERRUPTIBLE;
- schedule_timeout(1);
+ schedule_timeout_interruptible(1);
#endif
}
printk(KERN_INFO "cosa%d: timeout in put_wait_data (status 0x%x)\n",
diff --git a/drivers/net/wan/cycx_drv.c b/drivers/net/wan/cycx_drv.c
index 9e56fc346ba..e6d005726aa 100644
--- a/drivers/net/wan/cycx_drv.c
+++ b/drivers/net/wan/cycx_drv.c
@@ -109,7 +109,7 @@ static long cycx_2x_irq_options[] = { 7, 3, 5, 9, 10, 11, 12, 15 };
* < 0 error.
* Context: process */
-int __init cycx_drv_init(void)
+static int __init cycx_drv_init(void)
{
printk(KERN_INFO "%s v%u.%u %s\n", fullname, MOD_VERSION, MOD_RELEASE,
copyright);
@@ -119,7 +119,7 @@ int __init cycx_drv_init(void)
/* Module 'remove' entry point.
* o release all remaining system resources */
-void cycx_drv_cleanup(void)
+static void cycx_drv_cleanup(void)
{
}
@@ -184,8 +184,7 @@ int cycx_down(struct cycx_hw *hw)
}
/* Enable interrupt generation. */
-EXPORT_SYMBOL(cycx_inten);
-void cycx_inten(struct cycx_hw *hw)
+static void cycx_inten(struct cycx_hw *hw)
{
writeb(0, hw->dpmbase);
}
diff --git a/drivers/net/wan/cycx_main.c b/drivers/net/wan/cycx_main.c
index 7b48064364d..430b1f630fb 100644
--- a/drivers/net/wan/cycx_main.c
+++ b/drivers/net/wan/cycx_main.c
@@ -103,7 +103,7 @@ static struct cycx_device *cycx_card_array; /* adapter data space */
* < 0 error.
* Context: process
*/
-int __init cycx_init(void)
+static int __init cycx_init(void)
{
int cnt, err = -ENOMEM;
diff --git a/drivers/net/wan/cycx_x25.c b/drivers/net/wan/cycx_x25.c
index 02d57c0b424..a631d1c2fa1 100644
--- a/drivers/net/wan/cycx_x25.c
+++ b/drivers/net/wan/cycx_x25.c
@@ -78,6 +78,7 @@
#define CYCLOMX_X25_DEBUG 1
+#include <linux/ctype.h> /* isdigit() */
#include <linux/errno.h> /* return codes */
#include <linux/if_arp.h> /* ARPHRD_HWX25 */
#include <linux/kernel.h> /* printk(), and other useful stuff */
@@ -418,7 +419,7 @@ static int cycx_wan_new_if(struct wan_device *wandev, struct net_device *dev,
/* Set channel timeouts (default if not specified) */
chan->idle_tmout = conf->idle_timeout ? conf->idle_timeout : 90;
- } else if (is_digit(conf->addr[0])) { /* PVC */
+ } else if (isdigit(conf->addr[0])) { /* PVC */
s16 lcn = dec_to_uint(conf->addr, 0);
if (lcn >= card->u.x.lo_pvc && lcn <= card->u.x.hi_pvc)
@@ -1531,7 +1532,7 @@ static unsigned dec_to_uint(u8 *str, int len)
if (!len)
len = strlen(str);
- for (; len && is_digit(*str); ++str, --len)
+ for (; len && isdigit(*str); ++str, --len)
val = (val * 10) + (*str - (unsigned) '0');
return val;
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index 520a77a798e..2f61a47b471 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -446,8 +446,8 @@ static inline unsigned int dscc4_tx_quiescent(struct dscc4_dev_priv *dpriv,
return readl(dpriv->base_addr + CH0FTDA + dpriv->dev_id*4) == dpriv->ltda;
}
-int state_check(u32 state, struct dscc4_dev_priv *dpriv, struct net_device *dev,
- const char *msg)
+static int state_check(u32 state, struct dscc4_dev_priv *dpriv,
+ struct net_device *dev, const char *msg)
{
int ret = 0;
@@ -466,8 +466,9 @@ int state_check(u32 state, struct dscc4_dev_priv *dpriv, struct net_device *dev,
return ret;
}
-void dscc4_tx_print(struct net_device *dev, struct dscc4_dev_priv *dpriv,
- char *msg)
+static void dscc4_tx_print(struct net_device *dev,
+ struct dscc4_dev_priv *dpriv,
+ char *msg)
{
printk(KERN_DEBUG "%s: tx_current=%02d tx_dirty=%02d (%s)\n",
dev->name, dpriv->tx_current, dpriv->tx_dirty, msg);
@@ -507,7 +508,8 @@ static void dscc4_release_ring(struct dscc4_dev_priv *dpriv)
}
}
-inline int try_get_rx_skb(struct dscc4_dev_priv *dpriv, struct net_device *dev)
+static inline int try_get_rx_skb(struct dscc4_dev_priv *dpriv,
+ struct net_device *dev)
{
unsigned int dirty = dpriv->rx_dirty%RX_RING_SIZE;
struct RxFD *rx_fd = dpriv->rx_fd + dirty;
@@ -542,8 +544,7 @@ static int dscc4_wait_ack_cec(struct dscc4_dev_priv *dpriv,
msg, i);
goto done;
}
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(10);
+ schedule_timeout_uninterruptible(10);
rmb();
} while (++i > 0);
printk(KERN_ERR "%s: %s timeout\n", dev->name, msg);
@@ -588,8 +589,7 @@ static inline int dscc4_xpr_ack(struct dscc4_dev_priv *dpriv)
(dpriv->iqtx[cur] & Xpr))
break;
smp_rmb();
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(10);
+ schedule_timeout_uninterruptible(10);
} while (++i > 0);
return (i >= 0 ) ? i : -EAGAIN;
@@ -1035,8 +1035,7 @@ static void dscc4_pci_reset(struct pci_dev *pdev, void __iomem *ioaddr)
/* Flush posted writes */
readl(ioaddr + GSTAR);
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(10);
+ schedule_timeout_uninterruptible(10);
for (i = 0; i < 16; i++)
pci_write_config_dword(pdev, i << 2, dscc4_pci_config_store[i]);
@@ -1894,7 +1893,7 @@ try:
* It failed and locked solid. Thus the introduction of a dummy skb.
* Problem is acknowledged in errata sheet DS5. Joy :o/
*/
-struct sk_buff *dscc4_init_dummy_skb(struct dscc4_dev_priv *dpriv)
+static struct sk_buff *dscc4_init_dummy_skb(struct dscc4_dev_priv *dpriv)
{
struct sk_buff *skb;
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index 2c83cca34b8..7981a2c7906 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -74,11 +74,11 @@ MODULE_LICENSE("GPL");
/*
* Modules parameters and associated varaibles
*/
-int fst_txq_low = FST_LOW_WATER_MARK;
-int fst_txq_high = FST_HIGH_WATER_MARK;
-int fst_max_reads = 7;
-int fst_excluded_cards = 0;
-int fst_excluded_list[FST_MAX_CARDS];
+static int fst_txq_low = FST_LOW_WATER_MARK;
+static int fst_txq_high = FST_HIGH_WATER_MARK;
+static int fst_max_reads = 7;
+static int fst_excluded_cards = 0;
+static int fst_excluded_list[FST_MAX_CARDS];
module_param(fst_txq_low, int, 0);
module_param(fst_txq_high, int, 0);
@@ -572,13 +572,13 @@ static void do_bottom_half_rx(struct fst_card_info *card);
static void fst_process_tx_work_q(unsigned long work_q);
static void fst_process_int_work_q(unsigned long work_q);
-DECLARE_TASKLET(fst_tx_task, fst_process_tx_work_q, 0);
-DECLARE_TASKLET(fst_int_task, fst_process_int_work_q, 0);
+static DECLARE_TASKLET(fst_tx_task, fst_process_tx_work_q, 0);
+static DECLARE_TASKLET(fst_int_task, fst_process_int_work_q, 0);
-struct fst_card_info *fst_card_array[FST_MAX_CARDS];
-spinlock_t fst_work_q_lock;
-u64 fst_work_txq;
-u64 fst_work_intq;
+static struct fst_card_info *fst_card_array[FST_MAX_CARDS];
+static spinlock_t fst_work_q_lock;
+static u64 fst_work_txq;
+static u64 fst_work_intq;
static void
fst_q_work_item(u64 * queue, int card_index)
@@ -980,8 +980,7 @@ fst_issue_cmd(struct fst_port_info *port, unsigned short cmd)
/* Wait for any previous command to complete */
while (mbval > NAK) {
spin_unlock_irqrestore(&card->card_lock, flags);
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(1);
+ schedule_timeout_uninterruptible(1);
spin_lock_irqsave(&card->card_lock, flags);
if (++safety > 2000) {
@@ -1498,7 +1497,7 @@ do_bottom_half_rx(struct fst_card_info *card)
* The interrupt service routine
* Dev_id is our fst_card_info pointer
*/
-irqreturn_t
+static irqreturn_t
fst_intr(int irq, void *dev_id, struct pt_regs *regs)
{
struct fst_card_info *card;
diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c
index a5d6891c9d4..e1601d35dce 100644
--- a/drivers/net/wan/hdlc_fr.c
+++ b/drivers/net/wan/hdlc_fr.c
@@ -330,7 +330,7 @@ static int pvc_close(struct net_device *dev)
-int pvc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+static int pvc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
pvc_device *pvc = dev_to_pvc(dev);
fr_proto_pvc_info info;
diff --git a/drivers/net/wan/lmc/lmc_debug.c b/drivers/net/wan/lmc/lmc_debug.c
index 9dccd9546a1..3b94352b0d0 100644
--- a/drivers/net/wan/lmc/lmc_debug.c
+++ b/drivers/net/wan/lmc/lmc_debug.c
@@ -8,10 +8,10 @@
/*
* Prints out len, max to 80 octets using printk, 20 per line
*/
-void lmcConsoleLog(char *type, unsigned char *ucData, int iLen)
-{
#ifdef DEBUG
#ifdef LMC_PACKET_LOG
+void lmcConsoleLog(char *type, unsigned char *ucData, int iLen)
+{
int iNewLine = 1;
char str[80], *pstr;
@@ -43,26 +43,24 @@ void lmcConsoleLog(char *type, unsigned char *ucData, int iLen)
}
sprintf(pstr, "\n");
printk(str);
+}
#endif
#endif
-}
#ifdef DEBUG
u_int32_t lmcEventLogIndex = 0;
u_int32_t lmcEventLogBuf[LMC_EVENTLOGSIZE * LMC_EVENTLOGARGS];
-#endif
void lmcEventLog (u_int32_t EventNum, u_int32_t arg2, u_int32_t arg3)
{
-#ifdef DEBUG
lmcEventLogBuf[lmcEventLogIndex++] = EventNum;
lmcEventLogBuf[lmcEventLogIndex++] = arg2;
lmcEventLogBuf[lmcEventLogIndex++] = arg3;
lmcEventLogBuf[lmcEventLogIndex++] = jiffies;
lmcEventLogIndex &= (LMC_EVENTLOGSIZE * LMC_EVENTLOGARGS) - 1;
-#endif
}
+#endif /* DEBUG */
void lmc_trace(struct net_device *dev, char *msg){
#ifdef LMC_TRACE
diff --git a/drivers/net/wan/lmc/lmc_media.c b/drivers/net/wan/lmc/lmc_media.c
index f55ce76b00e..af8b55fdd9d 100644
--- a/drivers/net/wan/lmc/lmc_media.c
+++ b/drivers/net/wan/lmc/lmc_media.c
@@ -48,14 +48,6 @@
*/
/*
- * For lack of a better place, put the SSI cable stuff here.
- */
-char *lmc_t1_cables[] = {
- "V.10/RS423", "EIA530A", "reserved", "X.21", "V.35",
- "EIA449/EIA530/V.36", "V.28/EIA232", "none", NULL
-};
-
-/*
* protocol independent method.
*/
static void lmc_set_protocol (lmc_softc_t * const, lmc_ctl_t *);
diff --git a/drivers/net/wan/pc300.h b/drivers/net/wan/pc300.h
index 73401b0f015..2024b26b99e 100644
--- a/drivers/net/wan/pc300.h
+++ b/drivers/net/wan/pc300.h
@@ -472,24 +472,8 @@ enum pc300_loopback_cmds {
#ifdef __KERNEL__
/* Function Prototypes */
-int dma_buf_write(pc300_t *, int, ucchar *, int);
-int dma_buf_read(pc300_t *, int, struct sk_buff *);
void tx_dma_start(pc300_t *, int);
-void rx_dma_start(pc300_t *, int);
-void tx_dma_stop(pc300_t *, int);
-void rx_dma_stop(pc300_t *, int);
-int cpc_queue_xmit(struct sk_buff *, struct net_device *);
-void cpc_net_rx(struct net_device *);
-void cpc_sca_status(pc300_t *, int);
-int cpc_change_mtu(struct net_device *, int);
-int cpc_ioctl(struct net_device *, struct ifreq *, int);
-int ch_config(pc300dev_t *);
-int rx_config(pc300dev_t *);
-int tx_config(pc300dev_t *);
-void cpc_opench(pc300dev_t *);
-void cpc_closech(pc300dev_t *);
int cpc_open(struct net_device *dev);
-int cpc_close(struct net_device *dev);
int cpc_set_media(hdlc_device *, int);
#endif /* __KERNEL__ */
diff --git a/drivers/net/wan/pc300_drv.c b/drivers/net/wan/pc300_drv.c
index 3e7753b1071..a3e65d1bc19 100644
--- a/drivers/net/wan/pc300_drv.c
+++ b/drivers/net/wan/pc300_drv.c
@@ -291,6 +291,7 @@ static uclong detect_ram(pc300_t *);
static void plx_init(pc300_t *);
static void cpc_trace(struct net_device *, struct sk_buff *, char);
static int cpc_attach(struct net_device *, unsigned short, unsigned short);
+static int cpc_close(struct net_device *dev);
#ifdef CONFIG_PC300_MLPPP
void cpc_tty_init(pc300dev_t * dev);
@@ -437,7 +438,7 @@ static void rx_dma_buf_check(pc300_t * card, int ch)
printk("\n");
}
-int dma_get_rx_frame_size(pc300_t * card, int ch)
+static int dma_get_rx_frame_size(pc300_t * card, int ch)
{
volatile pcsca_bd_t __iomem *ptdescr;
ucshort first_bd = card->chan[ch].rx_first_bd;
@@ -462,7 +463,7 @@ int dma_get_rx_frame_size(pc300_t * card, int ch)
* dma_buf_write: writes a frame to the Tx DMA buffers
* NOTE: this function writes one frame at a time.
*/
-int dma_buf_write(pc300_t * card, int ch, ucchar * ptdata, int len)
+static int dma_buf_write(pc300_t * card, int ch, ucchar * ptdata, int len)
{
int i, nchar;
volatile pcsca_bd_t __iomem *ptdescr;
@@ -503,7 +504,7 @@ int dma_buf_write(pc300_t * card, int ch, ucchar * ptdata, int len)
* dma_buf_read: reads a frame from the Rx DMA buffers
* NOTE: this function reads one frame at a time.
*/
-int dma_buf_read(pc300_t * card, int ch, struct sk_buff *skb)
+static int dma_buf_read(pc300_t * card, int ch, struct sk_buff *skb)
{
int nchar;
pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
@@ -560,7 +561,7 @@ int dma_buf_read(pc300_t * card, int ch, struct sk_buff *skb)
return (rcvd);
}
-void tx_dma_stop(pc300_t * card, int ch)
+static void tx_dma_stop(pc300_t * card, int ch)
{
void __iomem *scabase = card->hw.scabase;
ucchar drr_ena_bit = 1 << (5 + 2 * ch);
@@ -571,7 +572,7 @@ void tx_dma_stop(pc300_t * card, int ch)
cpc_writeb(scabase + DRR, drr_rst_bit & ~drr_ena_bit);
}
-void rx_dma_stop(pc300_t * card, int ch)
+static void rx_dma_stop(pc300_t * card, int ch)
{
void __iomem *scabase = card->hw.scabase;
ucchar drr_ena_bit = 1 << (4 + 2 * ch);
@@ -582,7 +583,7 @@ void rx_dma_stop(pc300_t * card, int ch)
cpc_writeb(scabase + DRR, drr_rst_bit & ~drr_ena_bit);
}
-void rx_dma_start(pc300_t * card, int ch)
+static void rx_dma_start(pc300_t * card, int ch)
{
void __iomem *scabase = card->hw.scabase;
pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
@@ -607,7 +608,7 @@ void rx_dma_start(pc300_t * card, int ch)
/*************************/
/*** FALC Routines ***/
/*************************/
-void falc_issue_cmd(pc300_t * card, int ch, ucchar cmd)
+static void falc_issue_cmd(pc300_t * card, int ch, ucchar cmd)
{
void __iomem *falcbase = card->hw.falcbase;
unsigned long i = 0;
@@ -622,7 +623,7 @@ void falc_issue_cmd(pc300_t * card, int ch, ucchar cmd)
cpc_writeb(falcbase + F_REG(CMDR, ch), cmd);
}
-void falc_intr_enable(pc300_t * card, int ch)
+static void falc_intr_enable(pc300_t * card, int ch)
{
pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
@@ -672,7 +673,7 @@ void falc_intr_enable(pc300_t * card, int ch)
}
}
-void falc_open_timeslot(pc300_t * card, int ch, int timeslot)
+static void falc_open_timeslot(pc300_t * card, int ch, int timeslot)
{
void __iomem *falcbase = card->hw.falcbase;
ucchar tshf = card->chan[ch].falc.offset;
@@ -688,7 +689,7 @@ void falc_open_timeslot(pc300_t * card, int ch, int timeslot)
(0x80 >> (timeslot & 0x07)));
}
-void falc_close_timeslot(pc300_t * card, int ch, int timeslot)
+static void falc_close_timeslot(pc300_t * card, int ch, int timeslot)
{
void __iomem *falcbase = card->hw.falcbase;
ucchar tshf = card->chan[ch].falc.offset;
@@ -704,7 +705,7 @@ void falc_close_timeslot(pc300_t * card, int ch, int timeslot)
~(0x80 >> (timeslot & 0x07)));
}
-void falc_close_all_timeslots(pc300_t * card, int ch)
+static void falc_close_all_timeslots(pc300_t * card, int ch)
{
pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
@@ -726,7 +727,7 @@ void falc_close_all_timeslots(pc300_t * card, int ch)
}
}
-void falc_open_all_timeslots(pc300_t * card, int ch)
+static void falc_open_all_timeslots(pc300_t * card, int ch)
{
pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
@@ -758,7 +759,7 @@ void falc_open_all_timeslots(pc300_t * card, int ch)
}
}
-void falc_init_timeslot(pc300_t * card, int ch)
+static void falc_init_timeslot(pc300_t * card, int ch)
{
pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
@@ -776,7 +777,7 @@ void falc_init_timeslot(pc300_t * card, int ch)
}
}
-void falc_enable_comm(pc300_t * card, int ch)
+static void falc_enable_comm(pc300_t * card, int ch)
{
pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
falc_t *pfalc = (falc_t *) & chan->falc;
@@ -792,7 +793,7 @@ void falc_enable_comm(pc300_t * card, int ch)
~((CPLD_REG1_FALC_DCD | CPLD_REG1_FALC_CTS) << (2 * ch)));
}
-void falc_disable_comm(pc300_t * card, int ch)
+static void falc_disable_comm(pc300_t * card, int ch)
{
pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
falc_t *pfalc = (falc_t *) & chan->falc;
@@ -806,7 +807,7 @@ void falc_disable_comm(pc300_t * card, int ch)
((CPLD_REG1_FALC_DCD | CPLD_REG1_FALC_CTS) << (2 * ch)));
}
-void falc_init_t1(pc300_t * card, int ch)
+static void falc_init_t1(pc300_t * card, int ch)
{
pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
@@ -975,7 +976,7 @@ void falc_init_t1(pc300_t * card, int ch)
falc_close_all_timeslots(card, ch);
}
-void falc_init_e1(pc300_t * card, int ch)
+static void falc_init_e1(pc300_t * card, int ch)
{
pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
@@ -1155,7 +1156,7 @@ void falc_init_e1(pc300_t * card, int ch)
falc_close_all_timeslots(card, ch);
}
-void falc_init_hdlc(pc300_t * card, int ch)
+static void falc_init_hdlc(pc300_t * card, int ch)
{
void __iomem *falcbase = card->hw.falcbase;
pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
@@ -1181,7 +1182,7 @@ void falc_init_hdlc(pc300_t * card, int ch)
falc_intr_enable(card, ch);
}
-void te_config(pc300_t * card, int ch)
+static void te_config(pc300_t * card, int ch)
{
pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
@@ -1241,7 +1242,7 @@ void te_config(pc300_t * card, int ch)
CPC_UNLOCK(card, flags);
}
-void falc_check_status(pc300_t * card, int ch, unsigned char frs0)
+static void falc_check_status(pc300_t * card, int ch, unsigned char frs0)
{
pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
@@ -1397,7 +1398,7 @@ void falc_check_status(pc300_t * card, int ch, unsigned char frs0)
}
}
-void falc_update_stats(pc300_t * card, int ch)
+static void falc_update_stats(pc300_t * card, int ch)
{
pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
@@ -1450,7 +1451,7 @@ void falc_update_stats(pc300_t * card, int ch)
* the synchronizer and then sent to the system interface.
*----------------------------------------------------------------------------
*/
-void falc_remote_loop(pc300_t * card, int ch, int loop_on)
+static void falc_remote_loop(pc300_t * card, int ch, int loop_on)
{
pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
@@ -1495,7 +1496,7 @@ void falc_remote_loop(pc300_t * card, int ch, int loop_on)
* coding must be identical.
*----------------------------------------------------------------------------
*/
-void falc_local_loop(pc300_t * card, int ch, int loop_on)
+static void falc_local_loop(pc300_t * card, int ch, int loop_on)
{
pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
falc_t *pfalc = (falc_t *) & chan->falc;
@@ -1522,7 +1523,7 @@ void falc_local_loop(pc300_t * card, int ch, int loop_on)
* looped. They are originated by the FALC-LH transmitter.
*----------------------------------------------------------------------------
*/
-void falc_payload_loop(pc300_t * card, int ch, int loop_on)
+static void falc_payload_loop(pc300_t * card, int ch, int loop_on)
{
pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
@@ -1576,7 +1577,7 @@ void falc_payload_loop(pc300_t * card, int ch, int loop_on)
* Description: Turns XLU bit off in the proper register
*----------------------------------------------------------------------------
*/
-void turn_off_xlu(pc300_t * card, int ch)
+static void turn_off_xlu(pc300_t * card, int ch)
{
pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
@@ -1597,7 +1598,7 @@ void turn_off_xlu(pc300_t * card, int ch)
* Description: Turns XLD bit off in the proper register
*----------------------------------------------------------------------------
*/
-void turn_off_xld(pc300_t * card, int ch)
+static void turn_off_xld(pc300_t * card, int ch)
{
pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
@@ -1619,7 +1620,7 @@ void turn_off_xld(pc300_t * card, int ch)
* to generate a LOOP activation code over a T1/E1 line.
*----------------------------------------------------------------------------
*/
-void falc_generate_loop_up_code(pc300_t * card, int ch)
+static void falc_generate_loop_up_code(pc300_t * card, int ch)
{
pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
@@ -1652,7 +1653,7 @@ void falc_generate_loop_up_code(pc300_t * card, int ch)
* to generate a LOOP deactivation code over a T1/E1 line.
*----------------------------------------------------------------------------
*/
-void falc_generate_loop_down_code(pc300_t * card, int ch)
+static void falc_generate_loop_down_code(pc300_t * card, int ch)
{
pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
@@ -1682,7 +1683,7 @@ void falc_generate_loop_down_code(pc300_t * card, int ch)
* it on the reception side.
*----------------------------------------------------------------------------
*/
-void falc_pattern_test(pc300_t * card, int ch, unsigned int activate)
+static void falc_pattern_test(pc300_t * card, int ch, unsigned int activate)
{
pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
@@ -1729,7 +1730,7 @@ void falc_pattern_test(pc300_t * card, int ch, unsigned int activate)
* Description: This routine returns the bit error counter value
*----------------------------------------------------------------------------
*/
-ucshort falc_pattern_test_error(pc300_t * card, int ch)
+static ucshort falc_pattern_test_error(pc300_t * card, int ch)
{
pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
falc_t *pfalc = (falc_t *) & chan->falc;
@@ -1769,7 +1770,7 @@ cpc_trace(struct net_device *dev, struct sk_buff *skb_main, char rx_tx)
netif_rx(skb);
}
-void cpc_tx_timeout(struct net_device *dev)
+static void cpc_tx_timeout(struct net_device *dev)
{
pc300dev_t *d = (pc300dev_t *) dev->priv;
pc300ch_t *chan = (pc300ch_t *) d->chan;
@@ -1797,7 +1798,7 @@ void cpc_tx_timeout(struct net_device *dev)
netif_wake_queue(dev);
}
-int cpc_queue_xmit(struct sk_buff *skb, struct net_device *dev)
+static int cpc_queue_xmit(struct sk_buff *skb, struct net_device *dev)
{
pc300dev_t *d = (pc300dev_t *) dev->priv;
pc300ch_t *chan = (pc300ch_t *) d->chan;
@@ -1880,7 +1881,7 @@ int cpc_queue_xmit(struct sk_buff *skb, struct net_device *dev)
return 0;
}
-void cpc_net_rx(struct net_device *dev)
+static void cpc_net_rx(struct net_device *dev)
{
pc300dev_t *d = (pc300dev_t *) dev->priv;
pc300ch_t *chan = (pc300ch_t *) d->chan;
@@ -2403,7 +2404,7 @@ static irqreturn_t cpc_intr(int irq, void *dev_id, struct pt_regs *regs)
return IRQ_HANDLED;
}
-void cpc_sca_status(pc300_t * card, int ch)
+static void cpc_sca_status(pc300_t * card, int ch)
{
ucchar ilar;
void __iomem *scabase = card->hw.scabase;
@@ -2495,7 +2496,7 @@ void cpc_sca_status(pc300_t * card, int ch)
}
}
-void cpc_falc_status(pc300_t * card, int ch)
+static void cpc_falc_status(pc300_t * card, int ch)
{
pc300ch_t *chan = &card->chan[ch];
falc_t *pfalc = (falc_t *) & chan->falc;
@@ -2523,7 +2524,7 @@ void cpc_falc_status(pc300_t * card, int ch)
CPC_UNLOCK(card, flags);
}
-int cpc_change_mtu(struct net_device *dev, int new_mtu)
+static int cpc_change_mtu(struct net_device *dev, int new_mtu)
{
if ((new_mtu < 128) || (new_mtu > PC300_DEF_MTU))
return -EINVAL;
@@ -2531,7 +2532,7 @@ int cpc_change_mtu(struct net_device *dev, int new_mtu)
return 0;
}
-int cpc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+static int cpc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
hdlc_device *hdlc = dev_to_hdlc(dev);
pc300dev_t *d = (pc300dev_t *) dev->priv;
@@ -2856,7 +2857,7 @@ static int clock_rate_calc(uclong rate, uclong clock, int *br_io)
}
}
-int ch_config(pc300dev_t * d)
+static int ch_config(pc300dev_t * d)
{
pc300ch_t *chan = (pc300ch_t *) d->chan;
pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
@@ -3004,7 +3005,7 @@ int ch_config(pc300dev_t * d)
return 0;
}
-int rx_config(pc300dev_t * d)
+static int rx_config(pc300dev_t * d)
{
pc300ch_t *chan = (pc300ch_t *) d->chan;
pc300_t *card = (pc300_t *) chan->card;
@@ -3035,7 +3036,7 @@ int rx_config(pc300dev_t * d)
return 0;
}
-int tx_config(pc300dev_t * d)
+static int tx_config(pc300dev_t * d)
{
pc300ch_t *chan = (pc300ch_t *) d->chan;
pc300_t *card = (pc300_t *) chan->card;
@@ -3098,7 +3099,7 @@ static int cpc_attach(struct net_device *dev, unsigned short encoding,
return 0;
}
-void cpc_opench(pc300dev_t * d)
+static void cpc_opench(pc300dev_t * d)
{
pc300ch_t *chan = (pc300ch_t *) d->chan;
pc300_t *card = (pc300_t *) chan->card;
@@ -3116,7 +3117,7 @@ void cpc_opench(pc300dev_t * d)
cpc_readb(scabase + M_REG(CTL, ch)) & ~(CTL_RTS | CTL_DTR));
}
-void cpc_closech(pc300dev_t * d)
+static void cpc_closech(pc300dev_t * d)
{
pc300ch_t *chan = (pc300ch_t *) d->chan;
pc300_t *card = (pc300_t *) chan->card;
@@ -3173,7 +3174,7 @@ int cpc_open(struct net_device *dev)
return 0;
}
-int cpc_close(struct net_device *dev)
+static int cpc_close(struct net_device *dev)
{
hdlc_device *hdlc = dev_to_hdlc(dev);
pc300dev_t *d = (pc300dev_t *) dev->priv;
diff --git a/drivers/net/wan/pc300_tty.c b/drivers/net/wan/pc300_tty.c
index 8454bf6caaa..52f26b9c69d 100644
--- a/drivers/net/wan/pc300_tty.c
+++ b/drivers/net/wan/pc300_tty.c
@@ -112,10 +112,10 @@ typedef struct _st_cpc_tty_area {
static struct tty_driver serial_drv;
/* local variables */
-st_cpc_tty_area cpc_tty_area[CPC_TTY_NPORTS];
+static st_cpc_tty_area cpc_tty_area[CPC_TTY_NPORTS];
-int cpc_tty_cnt=0; /* number of intrfaces configured with MLPPP */
-int cpc_tty_unreg_flag = 0;
+static int cpc_tty_cnt = 0; /* number of intrfaces configured with MLPPP */
+static int cpc_tty_unreg_flag = 0;
/* TTY functions prototype */
static int cpc_tty_open(struct tty_struct *tty, struct file *flip);
@@ -132,9 +132,9 @@ static void cpc_tty_trace(pc300dev_t *dev, char* buf, int len, char rxtx);
static void cpc_tty_signal_off(pc300dev_t *pc300dev, unsigned char);
static void cpc_tty_signal_on(pc300dev_t *pc300dev, unsigned char);
-int pc300_tiocmset(struct tty_struct *, struct file *,
- unsigned int, unsigned int);
-int pc300_tiocmget(struct tty_struct *, struct file *);
+static int pc300_tiocmset(struct tty_struct *, struct file *,
+ unsigned int, unsigned int);
+static int pc300_tiocmget(struct tty_struct *, struct file *);
/* functions called by PC300 driver */
void cpc_tty_init(pc300dev_t *dev);
@@ -538,8 +538,8 @@ static int cpc_tty_chars_in_buffer(struct tty_struct *tty)
return(0);
}
-int pc300_tiocmset(struct tty_struct *tty, struct file *file,
- unsigned int set, unsigned int clear)
+static int pc300_tiocmset(struct tty_struct *tty, struct file *file,
+ unsigned int set, unsigned int clear)
{
st_cpc_tty_area *cpc_tty;
@@ -565,7 +565,7 @@ int pc300_tiocmset(struct tty_struct *tty, struct file *file,
return 0;
}
-int pc300_tiocmget(struct tty_struct *tty, struct file *file)
+static int pc300_tiocmget(struct tty_struct *tty, struct file *file)
{
unsigned int result;
unsigned char status;
diff --git a/drivers/net/wan/sdla.c b/drivers/net/wan/sdla.c
index 3ac9a45b20f..036adc4f8ba 100644
--- a/drivers/net/wan/sdla.c
+++ b/drivers/net/wan/sdla.c
@@ -182,7 +182,7 @@ static char sdla_byte(struct net_device *dev, int addr)
return(byte);
}
-void sdla_stop(struct net_device *dev)
+static void sdla_stop(struct net_device *dev)
{
struct frad_local *flp;
@@ -209,7 +209,7 @@ void sdla_stop(struct net_device *dev)
}
}
-void sdla_start(struct net_device *dev)
+static void sdla_start(struct net_device *dev)
{
struct frad_local *flp;
@@ -247,7 +247,7 @@ void sdla_start(struct net_device *dev)
*
***************************************************/
-int sdla_z80_poll(struct net_device *dev, int z80_addr, int jiffs, char resp1, char resp2)
+static int sdla_z80_poll(struct net_device *dev, int z80_addr, int jiffs, char resp1, char resp2)
{
unsigned long start, done, now;
char resp, *temp;
@@ -505,7 +505,7 @@ static int sdla_cmd(struct net_device *dev, int cmd, short dlci, short flags,
static int sdla_reconfig(struct net_device *dev);
-int sdla_activate(struct net_device *slave, struct net_device *master)
+static int sdla_activate(struct net_device *slave, struct net_device *master)
{
struct frad_local *flp;
int i;
@@ -527,7 +527,7 @@ int sdla_activate(struct net_device *slave, struct net_device *master)
return(0);
}
-int sdla_deactivate(struct net_device *slave, struct net_device *master)
+static int sdla_deactivate(struct net_device *slave, struct net_device *master)
{
struct frad_local *flp;
int i;
@@ -549,7 +549,7 @@ int sdla_deactivate(struct net_device *slave, struct net_device *master)
return(0);
}
-int sdla_assoc(struct net_device *slave, struct net_device *master)
+static int sdla_assoc(struct net_device *slave, struct net_device *master)
{
struct frad_local *flp;
int i;
@@ -585,7 +585,7 @@ int sdla_assoc(struct net_device *slave, struct net_device *master)
return(0);
}
-int sdla_deassoc(struct net_device *slave, struct net_device *master)
+static int sdla_deassoc(struct net_device *slave, struct net_device *master)
{
struct frad_local *flp;
int i;
@@ -613,7 +613,7 @@ int sdla_deassoc(struct net_device *slave, struct net_device *master)
return(0);
}
-int sdla_dlci_conf(struct net_device *slave, struct net_device *master, int get)
+static int sdla_dlci_conf(struct net_device *slave, struct net_device *master, int get)
{
struct frad_local *flp;
struct dlci_local *dlp;
@@ -1324,7 +1324,7 @@ NOTE: This is rather a useless action right now, as the
return(0);
}
-int sdla_change_mtu(struct net_device *dev, int new_mtu)
+static int sdla_change_mtu(struct net_device *dev, int new_mtu)
{
struct frad_local *flp;
@@ -1337,7 +1337,7 @@ int sdla_change_mtu(struct net_device *dev, int new_mtu)
return(-EOPNOTSUPP);
}
-int sdla_set_config(struct net_device *dev, struct ifmap *map)
+static int sdla_set_config(struct net_device *dev, struct ifmap *map)
{
struct frad_local *flp;
int i;
diff --git a/drivers/net/wan/sdla_fr.c b/drivers/net/wan/sdla_fr.c
index 0497dbdb863..7f1ce9d4333 100644
--- a/drivers/net/wan/sdla_fr.c
+++ b/drivers/net/wan/sdla_fr.c
@@ -822,7 +822,7 @@ static int new_if(struct wan_device* wandev, struct net_device* dev,
chan->card = card;
/* verify media address */
- if (is_digit(conf->addr[0])) {
+ if (isdigit(conf->addr[0])) {
dlci = dec_to_uint(conf->addr, 0);
@@ -3456,7 +3456,7 @@ static unsigned int dec_to_uint (unsigned char* str, int len)
if (!len)
len = strlen(str);
- for (val = 0; len && is_digit(*str); ++str, --len)
+ for (val = 0; len && isdigit(*str); ++str, --len)
val = (val * 10) + (*str - (unsigned)'0');
return val;
diff --git a/drivers/net/wan/sdla_x25.c b/drivers/net/wan/sdla_x25.c
index 8a95d61a2f8..63f846d6f3a 100644
--- a/drivers/net/wan/sdla_x25.c
+++ b/drivers/net/wan/sdla_x25.c
@@ -957,7 +957,7 @@ static int new_if(struct wan_device* wandev, struct net_device* dev,
chan->hold_timeout = (conf->hold_timeout) ?
conf->hold_timeout : 10;
- }else if (is_digit(conf->addr[0])){ /* PVC */
+ }else if (isdigit(conf->addr[0])){ /* PVC */
int lcn = dec_to_uint(conf->addr, 0);
if ((lcn >= card->u.x.lo_pvc) && (lcn <= card->u.x.hi_pvc)){
@@ -3875,7 +3875,7 @@ static unsigned int dec_to_uint (unsigned char* str, int len)
if (!len)
len = strlen(str);
- for (val = 0; len && is_digit(*str); ++str, --len)
+ for (val = 0; len && isdigit(*str); ++str, --len)
val = (val * 10) + (*str - (unsigned)'0');
return val;
@@ -3896,9 +3896,9 @@ static unsigned int hex_to_uint (unsigned char* str, int len)
for (val = 0; len; ++str, --len)
{
ch = *str;
- if (is_digit(ch))
+ if (isdigit(ch))
val = (val << 4) + (ch - (unsigned)'0');
- else if (is_hex_digit(ch))
+ else if (isxdigit(ch))
val = (val << 4) + ((ch & 0xDF) - (unsigned)'A' + 10);
else break;
}
diff --git a/drivers/net/wan/sdladrv.c b/drivers/net/wan/sdladrv.c
index c8bc6da57a4..7c2cf2e7630 100644
--- a/drivers/net/wan/sdladrv.c
+++ b/drivers/net/wan/sdladrv.c
@@ -642,9 +642,7 @@ int sdla_mapmem (sdlahw_t* hw, unsigned long addr)
* Enable interrupt generation.
*/
-EXPORT_SYMBOL(sdla_inten);
-
-int sdla_inten (sdlahw_t* hw)
+static int sdla_inten (sdlahw_t* hw)
{
unsigned port = hw->port;
int tmp, i;
@@ -698,8 +696,7 @@ int sdla_inten (sdlahw_t* hw)
* Disable interrupt generation.
*/
-EXPORT_SYMBOL(sdla_intde);
-
+#if 0
int sdla_intde (sdlahw_t* hw)
{
unsigned port = hw->port;
@@ -748,14 +745,13 @@ int sdla_intde (sdlahw_t* hw)
}
return 0;
}
+#endif /* 0 */
/*============================================================================
* Acknowledge SDLA hardware interrupt.
*/
-EXPORT_SYMBOL(sdla_intack);
-
-int sdla_intack (sdlahw_t* hw)
+static int sdla_intack (sdlahw_t* hw)
{
unsigned port = hw->port;
int tmp;
@@ -827,8 +823,7 @@ void read_S514_int_stat (sdlahw_t* hw, u32* int_status)
* Generate an interrupt to adapter's CPU.
*/
-EXPORT_SYMBOL(sdla_intr);
-
+#if 0
int sdla_intr (sdlahw_t* hw)
{
unsigned port = hw->port;
@@ -863,6 +858,7 @@ int sdla_intr (sdlahw_t* hw)
}
return 0;
}
+#endif /* 0 */
/*============================================================================
* Execute Adapter Command.
diff --git a/drivers/net/wan/syncppp.c b/drivers/net/wan/syncppp.c
index a6d3b55013a..2d1bba06a08 100644
--- a/drivers/net/wan/syncppp.c
+++ b/drivers/net/wan/syncppp.c
@@ -221,7 +221,7 @@ static void sppp_clear_timeout(struct sppp *p)
* here.
*/
-void sppp_input (struct net_device *dev, struct sk_buff *skb)
+static void sppp_input (struct net_device *dev, struct sk_buff *skb)
{
struct ppp_header *h;
struct sppp *sp = (struct sppp *)sppp_of(dev);
@@ -355,8 +355,6 @@ done:
return;
}
-EXPORT_SYMBOL(sppp_input);
-
/*
* Handle transmit packets.
*/
@@ -990,7 +988,7 @@ EXPORT_SYMBOL(sppp_reopen);
* the mtu is out of range.
*/
-int sppp_change_mtu(struct net_device *dev, int new_mtu)
+static int sppp_change_mtu(struct net_device *dev, int new_mtu)
{
if(new_mtu<128||new_mtu>PPP_MTU||(dev->flags&IFF_UP))
return -EINVAL;
@@ -998,8 +996,6 @@ int sppp_change_mtu(struct net_device *dev, int new_mtu)
return 0;
}
-EXPORT_SYMBOL(sppp_change_mtu);
-
/**
* sppp_do_ioctl - Ioctl handler for ppp/hdlc
* @dev: Device subject to ioctl
@@ -1456,7 +1452,7 @@ static int sppp_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_t
return 0;
}
-struct packet_type sppp_packet_type = {
+static struct packet_type sppp_packet_type = {
.type = __constant_htons(ETH_P_WAN_PPP),
.func = sppp_rcv,
};
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index 06998c2240d..cb429e78374 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -1046,7 +1046,6 @@ static WifiCtlHdr wifictlhdr8023 = {
}
};
-#ifdef WIRELESS_EXT
// Frequency list (map channels to frequencies)
static const long frequency_list[] = { 2412, 2417, 2422, 2427, 2432, 2437, 2442,
2447, 2452, 2457, 2462, 2467, 2472, 2484 };
@@ -1067,7 +1066,6 @@ typedef struct wep_key_t {
/* List of Wireless Handlers (new API) */
static const struct iw_handler_def airo_handler_def;
-#endif /* WIRELESS_EXT */
static const char version[] = "airo.c 0.6 (Ben Reed & Javier Achirica)";
@@ -1110,10 +1108,8 @@ static irqreturn_t airo_interrupt( int irq, void* dev_id, struct pt_regs
static int airo_thread(void *data);
static void timer_func( struct net_device *dev );
static int airo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
-#ifdef WIRELESS_EXT
static struct iw_statistics *airo_get_wireless_stats (struct net_device *dev);
static void airo_read_wireless_stats (struct airo_info *local);
-#endif /* WIRELESS_EXT */
#ifdef CISCO_EXT
static int readrids(struct net_device *dev, aironet_ioctl *comp);
static int writerids(struct net_device *dev, aironet_ioctl *comp);
@@ -1187,12 +1183,10 @@ struct airo_info {
int fid;
} xmit, xmit11;
struct net_device *wifidev;
-#ifdef WIRELESS_EXT
struct iw_statistics wstats; // wireless stats
unsigned long scan_timestamp; /* Time started to scan */
struct iw_spy_data spy_data;
struct iw_public_data wireless_data;
-#endif /* WIRELESS_EXT */
#ifdef MICSUPPORT
/* MIC stuff */
struct crypto_tfm *tfm;
@@ -2527,7 +2521,8 @@ static int mpi_map_card(struct airo_info *ai, struct pci_dev *pci,
unsigned long mem_start, mem_len, aux_start, aux_len;
int rc = -1;
int i;
- unsigned char *busaddroff,*vpackoff;
+ dma_addr_t busaddroff;
+ unsigned char *vpackoff;
unsigned char __iomem *pciaddroff;
mem_start = pci_resource_start(pci, 1);
@@ -2570,7 +2565,7 @@ static int mpi_map_card(struct airo_info *ai, struct pci_dev *pci,
/*
* Setup descriptor RX, TX, CONFIG
*/
- busaddroff = (unsigned char *)ai->shared_dma;
+ busaddroff = ai->shared_dma;
pciaddroff = ai->pciaux + AUX_OFFSET;
vpackoff = ai->shared;
@@ -2579,7 +2574,7 @@ static int mpi_map_card(struct airo_info *ai, struct pci_dev *pci,
ai->rxfids[i].pending = 0;
ai->rxfids[i].card_ram_off = pciaddroff;
ai->rxfids[i].virtual_host_addr = vpackoff;
- ai->rxfids[i].rx_desc.host_addr = (dma_addr_t) busaddroff;
+ ai->rxfids[i].rx_desc.host_addr = busaddroff;
ai->rxfids[i].rx_desc.valid = 1;
ai->rxfids[i].rx_desc.len = PKTSIZE;
ai->rxfids[i].rx_desc.rdy = 0;
@@ -2594,7 +2589,7 @@ static int mpi_map_card(struct airo_info *ai, struct pci_dev *pci,
ai->txfids[i].card_ram_off = pciaddroff;
ai->txfids[i].virtual_host_addr = vpackoff;
ai->txfids[i].tx_desc.valid = 1;
- ai->txfids[i].tx_desc.host_addr = (dma_addr_t) busaddroff;
+ ai->txfids[i].tx_desc.host_addr = busaddroff;
memcpy(ai->txfids[i].virtual_host_addr,
&wifictlhdr8023, sizeof(wifictlhdr8023));
@@ -2607,8 +2602,8 @@ static int mpi_map_card(struct airo_info *ai, struct pci_dev *pci,
/* Rid descriptor setup */
ai->config_desc.card_ram_off = pciaddroff;
ai->config_desc.virtual_host_addr = vpackoff;
- ai->config_desc.rid_desc.host_addr = (dma_addr_t) busaddroff;
- ai->ridbus = (dma_addr_t)busaddroff;
+ ai->config_desc.rid_desc.host_addr = busaddroff;
+ ai->ridbus = busaddroff;
ai->config_desc.rid_desc.rid = 0;
ai->config_desc.rid_desc.len = RIDSIZE;
ai->config_desc.rid_desc.valid = 1;
@@ -2647,9 +2642,7 @@ static void wifi_setup(struct net_device *dev)
dev->get_stats = &airo_get_stats;
dev->set_mac_address = &airo_set_mac_address;
dev->do_ioctl = &airo_ioctl;
-#ifdef WIRELESS_EXT
dev->wireless_handlers = &airo_handler_def;
-#endif /* WIRELESS_EXT */
dev->change_mtu = &airo_change_mtu;
dev->open = &airo_open;
dev->stop = &airo_close;
@@ -2675,9 +2668,7 @@ static struct net_device *init_wifidev(struct airo_info *ai,
dev->priv = ethdev->priv;
dev->irq = ethdev->irq;
dev->base_addr = ethdev->base_addr;
-#ifdef WIRELESS_EXT
dev->wireless_data = ethdev->wireless_data;
-#endif /* WIRELESS_EXT */
memcpy(dev->dev_addr, ethdev->dev_addr, dev->addr_len);
err = register_netdev(dev);
if (err<0) {
@@ -2755,11 +2746,9 @@ static struct net_device *_init_airo_card( unsigned short irq, int port,
dev->set_multicast_list = &airo_set_multicast_list;
dev->set_mac_address = &airo_set_mac_address;
dev->do_ioctl = &airo_ioctl;
-#ifdef WIRELESS_EXT
dev->wireless_handlers = &airo_handler_def;
ai->wireless_data.spy_data = &ai->spy_data;
dev->wireless_data = &ai->wireless_data;
-#endif /* WIRELESS_EXT */
dev->change_mtu = &airo_change_mtu;
dev->open = &airo_open;
dev->stop = &airo_close;
@@ -5515,12 +5504,13 @@ static int airo_pci_resume(struct pci_dev *pdev)
struct net_device *dev = pci_get_drvdata(pdev);
struct airo_info *ai = dev->priv;
Resp rsp;
+ pci_power_t prev_state = pdev->current_state;
- pci_set_power_state(pdev, 0);
+ pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
- pci_enable_wake(pdev, pci_choose_state(pdev, ai->power), 0);
+ pci_enable_wake(pdev, PCI_D0, 0);
- if (ai->power.event > 1) {
+ if (prev_state != PCI_D1) {
reset_card(dev, 0);
mpi_init_descriptors(ai);
setup_card(ai, dev->dev_addr, 0);
@@ -5598,7 +5588,6 @@ static void __exit airo_cleanup_module( void )
remove_proc_entry("aironet", proc_root_driver);
}
-#ifdef WIRELESS_EXT
/*
* Initial Wireless Extension code for Aironet driver by :
* Jean Tourrilhes <jt@hpl.hp.com> - HPL - 17 November 00
@@ -7107,8 +7096,6 @@ static const struct iw_handler_def airo_handler_def =
.get_wireless_stats = airo_get_wireless_stats,
};
-#endif /* WIRELESS_EXT */
-
/*
* This defines the configuration part of the Wireless Extensions
* Note : irq and spinlock protection will occur in the subroutines
@@ -7187,7 +7174,6 @@ static int airo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
return rc;
}
-#ifdef WIRELESS_EXT
/*
* Get the Wireless stats out of the driver
* Note : irq and spinlock protection will occur in the subroutines
@@ -7260,7 +7246,6 @@ static struct iw_statistics *airo_get_wireless_stats(struct net_device *dev)
return &local->wstats;
}
-#endif /* WIRELESS_EXT */
#ifdef CISCO_EXT
/*
diff --git a/drivers/net/wireless/airport.c b/drivers/net/wireless/airport.c
index 9d496703c46..7b321f7cf35 100644
--- a/drivers/net/wireless/airport.c
+++ b/drivers/net/wireless/airport.c
@@ -15,28 +15,11 @@
#define PFX DRIVER_NAME ": "
#include <linux/config.h>
-
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/ptrace.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/timer.h>
-#include <linux/ioport.h>
-#include <linux/netdevice.h>
-#include <linux/if_arp.h>
-#include <linux/etherdevice.h>
-#include <linux/wireless.h>
-
-#include <asm/io.h>
-#include <asm/system.h>
-#include <asm/current.h>
-#include <asm/prom.h>
-#include <asm/machdep.h>
+#include <linux/delay.h>
#include <asm/pmac_feature.h>
-#include <asm/irq.h>
-#include <asm/uaccess.h>
#include "orinoco.h"
diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c
index 587869d86ee..d57011028b7 100644
--- a/drivers/net/wireless/atmel.c
+++ b/drivers/net/wireless/atmel.c
@@ -618,12 +618,12 @@ static int atmel_lock_mac(struct atmel_private *priv);
static void atmel_wmem32(struct atmel_private *priv, u16 pos, u32 data);
static void atmel_command_irq(struct atmel_private *priv);
static int atmel_validate_channel(struct atmel_private *priv, int channel);
-static void atmel_management_frame(struct atmel_private *priv, struct ieee80211_hdr *header,
+static void atmel_management_frame(struct atmel_private *priv, struct ieee80211_hdr_4addr *header,
u16 frame_len, u8 rssi);
static void atmel_management_timer(u_long a);
static void atmel_send_command(struct atmel_private *priv, int command, void *cmd, int cmd_size);
static int atmel_send_command_wait(struct atmel_private *priv, int command, void *cmd, int cmd_size);
-static void atmel_transmit_management_frame(struct atmel_private *priv, struct ieee80211_hdr *header,
+static void atmel_transmit_management_frame(struct atmel_private *priv, struct ieee80211_hdr_4addr *header,
u8 *body, int body_len);
static u8 atmel_get_mib8(struct atmel_private *priv, u8 type, u8 index);
@@ -827,7 +827,7 @@ static void tx_update_descriptor(struct atmel_private *priv, int is_bcast, u16 l
static int start_tx (struct sk_buff *skb, struct net_device *dev)
{
struct atmel_private *priv = netdev_priv(dev);
- struct ieee80211_hdr header;
+ struct ieee80211_hdr_4addr header;
unsigned long flags;
u16 buff, frame_ctl, len = (ETH_ZLEN < skb->len) ? skb->len : ETH_ZLEN;
u8 SNAP_RFC1024[6] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00};
@@ -902,7 +902,7 @@ static int start_tx (struct sk_buff *skb, struct net_device *dev)
}
static void atmel_transmit_management_frame(struct atmel_private *priv,
- struct ieee80211_hdr *header,
+ struct ieee80211_hdr_4addr *header,
u8 *body, int body_len)
{
u16 buff;
@@ -917,7 +917,7 @@ static void atmel_transmit_management_frame(struct atmel_private *priv,
tx_update_descriptor(priv, header->addr1[0] & 0x01, len, buff, TX_PACKET_TYPE_MGMT);
}
-static void fast_rx_path(struct atmel_private *priv, struct ieee80211_hdr *header,
+static void fast_rx_path(struct atmel_private *priv, struct ieee80211_hdr_4addr *header,
u16 msdu_size, u16 rx_packet_loc, u32 crc)
{
/* fast path: unfragmented packet copy directly into skbuf */
@@ -990,7 +990,7 @@ static int probe_crc(struct atmel_private *priv, u16 packet_loc, u16 msdu_size)
return (crc ^ 0xffffffff) == netcrc;
}
-static void frag_rx_path(struct atmel_private *priv, struct ieee80211_hdr *header,
+static void frag_rx_path(struct atmel_private *priv, struct ieee80211_hdr_4addr *header,
u16 msdu_size, u16 rx_packet_loc, u32 crc, u16 seq_no, u8 frag_no, int more_frags)
{
u8 mac4[6];
@@ -1082,7 +1082,7 @@ static void frag_rx_path(struct atmel_private *priv, struct ieee80211_hdr *heade
static void rx_done_irq(struct atmel_private *priv)
{
int i;
- struct ieee80211_hdr header;
+ struct ieee80211_hdr_4addr header;
for (i = 0;
atmel_rmem8(priv, atmel_rx(priv, RX_DESC_FLAGS_OFFSET, priv->rx_desc_head)) == RX_DESC_FLAG_VALID &&
@@ -2650,7 +2650,7 @@ static void handle_beacon_probe(struct atmel_private *priv, u16 capability, u8 c
static void send_authentication_request(struct atmel_private *priv, u8 *challenge, int challenge_len)
{
- struct ieee80211_hdr header;
+ struct ieee80211_hdr_4addr header;
struct auth_body auth;
header.frame_ctl = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_AUTH);
@@ -2688,7 +2688,7 @@ static void send_association_request(struct atmel_private *priv, int is_reassoc)
{
u8 *ssid_el_p;
int bodysize;
- struct ieee80211_hdr header;
+ struct ieee80211_hdr_4addr header;
struct ass_req_format {
u16 capability;
u16 listen_interval;
@@ -2738,7 +2738,7 @@ static void send_association_request(struct atmel_private *priv, int is_reassoc)
atmel_transmit_management_frame(priv, &header, (void *)&body, bodysize);
}
-static int is_frame_from_current_bss(struct atmel_private *priv, struct ieee80211_hdr *header)
+static int is_frame_from_current_bss(struct atmel_private *priv, struct ieee80211_hdr_4addr *header)
{
if (le16_to_cpu(header->frame_ctl) & IEEE80211_FCTL_FROMDS)
return memcmp(header->addr3, priv->CurrentBSSID, 6) == 0;
@@ -2788,7 +2788,7 @@ static int retrieve_bss(struct atmel_private *priv)
}
-static void store_bss_info(struct atmel_private *priv, struct ieee80211_hdr *header,
+static void store_bss_info(struct atmel_private *priv, struct ieee80211_hdr_4addr *header,
u16 capability, u16 beacon_period, u8 channel, u8 rssi,
u8 ssid_len, u8 *ssid, int is_beacon)
{
@@ -3072,7 +3072,7 @@ static void atmel_smooth_qual(struct atmel_private *priv)
}
/* deals with incoming managment frames. */
-static void atmel_management_frame(struct atmel_private *priv, struct ieee80211_hdr *header,
+static void atmel_management_frame(struct atmel_private *priv, struct ieee80211_hdr_4addr *header,
u16 frame_len, u8 rssi)
{
u16 subtype;
diff --git a/drivers/net/wireless/hermes.c b/drivers/net/wireless/hermes.c
index 21c3d0d227e..eba0d9d2b7c 100644
--- a/drivers/net/wireless/hermes.c
+++ b/drivers/net/wireless/hermes.c
@@ -39,17 +39,10 @@
*/
#include <linux/config.h>
-
#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/threads.h>
-#include <linux/smp.h>
-#include <asm/io.h>
-#include <linux/delay.h>
-#include <linux/init.h>
#include <linux/kernel.h>
-#include <linux/net.h>
-#include <asm/errno.h>
+#include <linux/init.h>
+#include <linux/delay.h>
#include "hermes.h"
diff --git a/drivers/net/wireless/hermes.h b/drivers/net/wireless/hermes.h
index 8c9e874c911..ad28e329436 100644
--- a/drivers/net/wireless/hermes.h
+++ b/drivers/net/wireless/hermes.h
@@ -30,9 +30,8 @@
* access to the hermes_t structure, and to the hardware
*/
-#include <linux/delay.h>
#include <linux/if_ether.h>
-#include <asm/byteorder.h>
+#include <asm/io.h>
/*
* Limits and constants
@@ -192,13 +191,13 @@
#define HERMES_RXSTAT_WMP (0x6000) /* Wavelan-II Management Protocol frame */
struct hermes_tx_descriptor {
- u16 status;
- u16 reserved1;
- u16 reserved2;
- u32 sw_support;
+ __le16 status;
+ __le16 reserved1;
+ __le16 reserved2;
+ __le32 sw_support;
u8 retry_count;
u8 tx_rate;
- u16 tx_control;
+ __le16 tx_control;
} __attribute__ ((packed));
#define HERMES_TXSTAT_RETRYERR (0x0001)
@@ -222,60 +221,60 @@ struct hermes_tx_descriptor {
#define HERMES_INQ_SEC_STAT_AGERE (0xF202)
struct hermes_tallies_frame {
- u16 TxUnicastFrames;
- u16 TxMulticastFrames;
- u16 TxFragments;
- u16 TxUnicastOctets;
- u16 TxMulticastOctets;
- u16 TxDeferredTransmissions;
- u16 TxSingleRetryFrames;
- u16 TxMultipleRetryFrames;
- u16 TxRetryLimitExceeded;
- u16 TxDiscards;
- u16 RxUnicastFrames;
- u16 RxMulticastFrames;
- u16 RxFragments;
- u16 RxUnicastOctets;
- u16 RxMulticastOctets;
- u16 RxFCSErrors;
- u16 RxDiscards_NoBuffer;
- u16 TxDiscardsWrongSA;
- u16 RxWEPUndecryptable;
- u16 RxMsgInMsgFragments;
- u16 RxMsgInBadMsgFragments;
+ __le16 TxUnicastFrames;
+ __le16 TxMulticastFrames;
+ __le16 TxFragments;
+ __le16 TxUnicastOctets;
+ __le16 TxMulticastOctets;
+ __le16 TxDeferredTransmissions;
+ __le16 TxSingleRetryFrames;
+ __le16 TxMultipleRetryFrames;
+ __le16 TxRetryLimitExceeded;
+ __le16 TxDiscards;
+ __le16 RxUnicastFrames;
+ __le16 RxMulticastFrames;
+ __le16 RxFragments;
+ __le16 RxUnicastOctets;
+ __le16 RxMulticastOctets;
+ __le16 RxFCSErrors;
+ __le16 RxDiscards_NoBuffer;
+ __le16 TxDiscardsWrongSA;
+ __le16 RxWEPUndecryptable;
+ __le16 RxMsgInMsgFragments;
+ __le16 RxMsgInBadMsgFragments;
/* Those last are probably not available in very old firmwares */
- u16 RxDiscards_WEPICVError;
- u16 RxDiscards_WEPExcluded;
+ __le16 RxDiscards_WEPICVError;
+ __le16 RxDiscards_WEPExcluded;
} __attribute__ ((packed));
/* Grabbed from wlan-ng - Thanks Mark... - Jean II
* This is the result of a scan inquiry command */
/* Structure describing info about an Access Point */
struct prism2_scan_apinfo {
- u16 channel; /* Channel where the AP sits */
- u16 noise; /* Noise level */
- u16 level; /* Signal level */
+ __le16 channel; /* Channel where the AP sits */
+ __le16 noise; /* Noise level */
+ __le16 level; /* Signal level */
u8 bssid[ETH_ALEN]; /* MAC address of the Access Point */
- u16 beacon_interv; /* Beacon interval */
- u16 capabilities; /* Capabilities */
- u16 essid_len; /* ESSID length */
+ __le16 beacon_interv; /* Beacon interval */
+ __le16 capabilities; /* Capabilities */
+ __le16 essid_len; /* ESSID length */
u8 essid[32]; /* ESSID of the network */
u8 rates[10]; /* Bit rate supported */
- u16 proberesp_rate; /* Data rate of the response frame */
- u16 atim; /* ATIM window time, Kus (hostscan only) */
+ __le16 proberesp_rate; /* Data rate of the response frame */
+ __le16 atim; /* ATIM window time, Kus (hostscan only) */
} __attribute__ ((packed));
/* Same stuff for the Lucent/Agere card.
* Thanks to h1kari <h1kari AT dachb0den.com> - Jean II */
struct agere_scan_apinfo {
- u16 channel; /* Channel where the AP sits */
- u16 noise; /* Noise level */
- u16 level; /* Signal level */
+ __le16 channel; /* Channel where the AP sits */
+ __le16 noise; /* Noise level */
+ __le16 level; /* Signal level */
u8 bssid[ETH_ALEN]; /* MAC address of the Access Point */
- u16 beacon_interv; /* Beacon interval */
- u16 capabilities; /* Capabilities */
+ __le16 beacon_interv; /* Beacon interval */
+ __le16 capabilities; /* Capabilities */
/* bits: 0-ess, 1-ibss, 4-privacy [wep] */
- u16 essid_len; /* ESSID length */
+ __le16 essid_len; /* ESSID length */
u8 essid[32]; /* ESSID of the network */
} __attribute__ ((packed));
@@ -283,16 +282,16 @@ struct agere_scan_apinfo {
struct symbol_scan_apinfo {
u8 channel; /* Channel where the AP sits */
u8 unknown1; /* 8 in 2.9x and 3.9x f/w, 0 otherwise */
- u16 noise; /* Noise level */
- u16 level; /* Signal level */
+ __le16 noise; /* Noise level */
+ __le16 level; /* Signal level */
u8 bssid[ETH_ALEN]; /* MAC address of the Access Point */
- u16 beacon_interv; /* Beacon interval */
- u16 capabilities; /* Capabilities */
+ __le16 beacon_interv; /* Beacon interval */
+ __le16 capabilities; /* Capabilities */
/* bits: 0-ess, 1-ibss, 4-privacy [wep] */
- u16 essid_len; /* ESSID length */
+ __le16 essid_len; /* ESSID length */
u8 essid[32]; /* ESSID of the network */
- u16 rates[5]; /* Bit rate supported */
- u16 basic_rates; /* Basic rates bitmask */
+ __le16 rates[5]; /* Bit rate supported */
+ __le16 basic_rates; /* Basic rates bitmask */
u8 unknown2[6]; /* Always FF:FF:FF:FF:00:00 */
u8 unknown3[8]; /* Always 0, appeared in f/w 3.91-68 */
} __attribute__ ((packed));
@@ -312,7 +311,7 @@ union hermes_scan_info {
#define HERMES_LINKSTATUS_ASSOC_FAILED (0x0006)
struct hermes_linkstatus {
- u16 linkstatus; /* Link status */
+ __le16 linkstatus; /* Link status */
} __attribute__ ((packed));
struct hermes_response {
@@ -321,8 +320,8 @@ struct hermes_response {
/* "ID" structure - used for ESSID and station nickname */
struct hermes_idstring {
- u16 len;
- u16 val[16];
+ __le16 len;
+ __le16 val[16];
} __attribute__ ((packed));
struct hermes_multicast {
@@ -447,7 +446,7 @@ static inline void hermes_clear_words(struct hermes *hw, int off, unsigned count
static inline int hermes_read_wordrec(hermes_t *hw, int bap, u16 rid, u16 *word)
{
- u16 rec;
+ __le16 rec;
int err;
err = HERMES_READ_RECORD(hw, bap, rid, &rec);
@@ -457,7 +456,7 @@ static inline int hermes_read_wordrec(hermes_t *hw, int bap, u16 rid, u16 *word)
static inline int hermes_write_wordrec(hermes_t *hw, int bap, u16 rid, u16 word)
{
- u16 rec = cpu_to_le16(word);
+ __le16 rec = cpu_to_le16(word);
return HERMES_WRITE_RECORD(hw, bap, rid, &rec);
}
diff --git a/drivers/net/wireless/hostap/hostap.c b/drivers/net/wireless/hostap/hostap.c
index e7f5821b494..6a96cd9f268 100644
--- a/drivers/net/wireless/hostap/hostap.c
+++ b/drivers/net/wireless/hostap/hostap.c
@@ -716,9 +716,6 @@ static int prism2_close(struct net_device *dev)
hostap_deauth_all_stas(dev, local->ap, 1);
#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
- if (local->func->dev_close && local->func->dev_close(local))
- return 0;
-
if (dev == local->dev) {
local->func->hw_shutdown(dev, HOSTAP_HW_ENABLE_CMDCOMPL);
}
@@ -766,9 +763,6 @@ static int prism2_open(struct net_device *dev)
local->hw_downloading)
return -ENODEV;
- if (local->func->dev_open && local->func->dev_open(local))
- return 1;
-
if (!try_module_get(local->hw_module))
return -ENODEV;
local->num_dev_open++;
diff --git a/drivers/net/wireless/hostap/hostap_80211_rx.c b/drivers/net/wireless/hostap/hostap_80211_rx.c
index b0501243b17..ffac5089945 100644
--- a/drivers/net/wireless/hostap/hostap_80211_rx.c
+++ b/drivers/net/wireless/hostap/hostap_80211_rx.c
@@ -6,10 +6,10 @@
void hostap_dump_rx_80211(const char *name, struct sk_buff *skb,
struct hostap_80211_rx_status *rx_stats)
{
- struct ieee80211_hdr *hdr;
+ struct ieee80211_hdr_4addr *hdr;
u16 fc;
- hdr = (struct ieee80211_hdr *) skb->data;
+ hdr = (struct ieee80211_hdr_4addr *) skb->data;
printk(KERN_DEBUG "%s: RX signal=%d noise=%d rate=%d len=%d "
"jiffies=%ld\n",
@@ -51,7 +51,7 @@ int prism2_rx_80211(struct net_device *dev, struct sk_buff *skb,
int hdrlen, phdrlen, head_need, tail_need;
u16 fc;
int prism_header, ret;
- struct ieee80211_hdr *hdr;
+ struct ieee80211_hdr_4addr *hdr;
iface = netdev_priv(dev);
local = iface->local;
@@ -70,7 +70,7 @@ int prism2_rx_80211(struct net_device *dev, struct sk_buff *skb,
phdrlen = 0;
}
- hdr = (struct ieee80211_hdr *) skb->data;
+ hdr = (struct ieee80211_hdr_4addr *) skb->data;
fc = le16_to_cpu(hdr->frame_ctl);
if (type == PRISM2_RX_MGMT && (fc & IEEE80211_FCTL_VERS)) {
@@ -215,7 +215,7 @@ prism2_frag_cache_find(local_info_t *local, unsigned int seq,
/* Called only as a tasklet (software IRQ) */
static struct sk_buff *
-prism2_frag_cache_get(local_info_t *local, struct ieee80211_hdr *hdr)
+prism2_frag_cache_get(local_info_t *local, struct ieee80211_hdr_4addr *hdr)
{
struct sk_buff *skb = NULL;
u16 sc;
@@ -229,7 +229,7 @@ prism2_frag_cache_get(local_info_t *local, struct ieee80211_hdr *hdr)
if (frag == 0) {
/* Reserve enough space to fit maximum frame length */
skb = dev_alloc_skb(local->dev->mtu +
- sizeof(struct ieee80211_hdr) +
+ sizeof(struct ieee80211_hdr_4addr) +
8 /* LLC */ +
2 /* alignment */ +
8 /* WEP */ + ETH_ALEN /* WDS */);
@@ -267,7 +267,7 @@ prism2_frag_cache_get(local_info_t *local, struct ieee80211_hdr *hdr)
/* Called only as a tasklet (software IRQ) */
static int prism2_frag_cache_invalidate(local_info_t *local,
- struct ieee80211_hdr *hdr)
+ struct ieee80211_hdr_4addr *hdr)
{
u16 sc;
unsigned int seq;
@@ -441,7 +441,7 @@ hostap_rx_frame_mgmt(local_info_t *local, struct sk_buff *skb,
u16 stype)
{
if (local->iw_mode == IW_MODE_MASTER) {
- hostap_update_sta_ps(local, (struct ieee80211_hdr *)
+ hostap_update_sta_ps(local, (struct ieee80211_hdr_4addr *)
skb->data);
}
@@ -520,7 +520,7 @@ static inline struct net_device *prism2_rx_get_wds(local_info_t *local,
static inline int
-hostap_rx_frame_wds(local_info_t *local, struct ieee80211_hdr *hdr,
+hostap_rx_frame_wds(local_info_t *local, struct ieee80211_hdr_4addr *hdr,
u16 fc, struct net_device **wds)
{
/* FIX: is this really supposed to accept WDS frames only in Master
@@ -579,13 +579,13 @@ static int hostap_is_eapol_frame(local_info_t *local, struct sk_buff *skb)
{
struct net_device *dev = local->dev;
u16 fc, ethertype;
- struct ieee80211_hdr *hdr;
+ struct ieee80211_hdr_4addr *hdr;
u8 *pos;
if (skb->len < 24)
return 0;
- hdr = (struct ieee80211_hdr *) skb->data;
+ hdr = (struct ieee80211_hdr_4addr *) skb->data;
fc = le16_to_cpu(hdr->frame_ctl);
/* check that the frame is unicast frame to us */
@@ -619,13 +619,13 @@ static inline int
hostap_rx_frame_decrypt(local_info_t *local, struct sk_buff *skb,
struct ieee80211_crypt_data *crypt)
{
- struct ieee80211_hdr *hdr;
+ struct ieee80211_hdr_4addr *hdr;
int res, hdrlen;
if (crypt == NULL || crypt->ops->decrypt_mpdu == NULL)
return 0;
- hdr = (struct ieee80211_hdr *) skb->data;
+ hdr = (struct ieee80211_hdr_4addr *) skb->data;
hdrlen = hostap_80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
if (local->tkip_countermeasures &&
@@ -658,13 +658,13 @@ static inline int
hostap_rx_frame_decrypt_msdu(local_info_t *local, struct sk_buff *skb,
int keyidx, struct ieee80211_crypt_data *crypt)
{
- struct ieee80211_hdr *hdr;
+ struct ieee80211_hdr_4addr *hdr;
int res, hdrlen;
if (crypt == NULL || crypt->ops->decrypt_msdu == NULL)
return 0;
- hdr = (struct ieee80211_hdr *) skb->data;
+ hdr = (struct ieee80211_hdr_4addr *) skb->data;
hdrlen = hostap_80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
atomic_inc(&crypt->refcnt);
@@ -689,7 +689,7 @@ void hostap_80211_rx(struct net_device *dev, struct sk_buff *skb,
{
struct hostap_interface *iface;
local_info_t *local;
- struct ieee80211_hdr *hdr;
+ struct ieee80211_hdr_4addr *hdr;
size_t hdrlen;
u16 fc, type, stype, sc;
struct net_device *wds = NULL;
@@ -716,7 +716,7 @@ void hostap_80211_rx(struct net_device *dev, struct sk_buff *skb,
dev = local->ddev;
iface = netdev_priv(dev);
- hdr = (struct ieee80211_hdr *) skb->data;
+ hdr = (struct ieee80211_hdr_4addr *) skb->data;
stats = hostap_get_stats(dev);
if (skb->len < 10)
@@ -737,7 +737,8 @@ void hostap_80211_rx(struct net_device *dev, struct sk_buff *skb,
struct iw_quality wstats;
wstats.level = rx_stats->signal;
wstats.noise = rx_stats->noise;
- wstats.updated = 6; /* No qual value */
+ wstats.updated = IW_QUAL_LEVEL_UPDATED | IW_QUAL_NOISE_UPDATED
+ | IW_QUAL_QUAL_INVALID | IW_QUAL_DBM;
/* Update spy records */
wireless_spy_update(dev, hdr->addr2, &wstats);
}
@@ -889,7 +890,7 @@ void hostap_80211_rx(struct net_device *dev, struct sk_buff *skb,
if (local->host_decrypt && (fc & IEEE80211_FCTL_PROTECTED) &&
(keyidx = hostap_rx_frame_decrypt(local, skb, crypt)) < 0)
goto rx_dropped;
- hdr = (struct ieee80211_hdr *) skb->data;
+ hdr = (struct ieee80211_hdr_4addr *) skb->data;
/* skb: hdr + (possibly fragmented) plaintext payload */
@@ -941,7 +942,7 @@ void hostap_80211_rx(struct net_device *dev, struct sk_buff *skb,
/* this was the last fragment and the frame will be
* delivered, so remove skb from fragment cache */
skb = frag_skb;
- hdr = (struct ieee80211_hdr *) skb->data;
+ hdr = (struct ieee80211_hdr_4addr *) skb->data;
prism2_frag_cache_invalidate(local, hdr);
}
@@ -952,7 +953,7 @@ void hostap_80211_rx(struct net_device *dev, struct sk_buff *skb,
hostap_rx_frame_decrypt_msdu(local, skb, keyidx, crypt))
goto rx_dropped;
- hdr = (struct ieee80211_hdr *) skb->data;
+ hdr = (struct ieee80211_hdr_4addr *) skb->data;
if (crypt && !(fc & IEEE80211_FCTL_PROTECTED) && !local->open_wep) {
if (local->ieee_802_1x &&
hostap_is_eapol_frame(local, skb)) {
diff --git a/drivers/net/wireless/hostap/hostap_80211_tx.c b/drivers/net/wireless/hostap/hostap_80211_tx.c
index 6358015f652..9d24f8a38ac 100644
--- a/drivers/net/wireless/hostap/hostap_80211_tx.c
+++ b/drivers/net/wireless/hostap/hostap_80211_tx.c
@@ -1,9 +1,9 @@
void hostap_dump_tx_80211(const char *name, struct sk_buff *skb)
{
- struct ieee80211_hdr *hdr;
+ struct ieee80211_hdr_4addr *hdr;
u16 fc;
- hdr = (struct ieee80211_hdr *) skb->data;
+ hdr = (struct ieee80211_hdr_4addr *) skb->data;
printk(KERN_DEBUG "%s: TX len=%d jiffies=%ld\n",
name, skb->len, jiffies);
@@ -41,7 +41,7 @@ int hostap_data_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct hostap_interface *iface;
local_info_t *local;
int need_headroom, need_tailroom = 0;
- struct ieee80211_hdr hdr;
+ struct ieee80211_hdr_4addr hdr;
u16 fc, ethertype = 0;
enum {
WDS_NO = 0, WDS_OWN_FRAME, WDS_COMPLIANT_FRAME
@@ -244,7 +244,7 @@ int hostap_mgmt_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct hostap_interface *iface;
local_info_t *local;
struct hostap_skb_tx_data *meta;
- struct ieee80211_hdr *hdr;
+ struct ieee80211_hdr_4addr *hdr;
u16 fc;
iface = netdev_priv(dev);
@@ -266,7 +266,7 @@ int hostap_mgmt_start_xmit(struct sk_buff *skb, struct net_device *dev)
meta->iface = iface;
if (skb->len >= IEEE80211_DATA_HDR3_LEN + sizeof(rfc1042_header) + 2) {
- hdr = (struct ieee80211_hdr *) skb->data;
+ hdr = (struct ieee80211_hdr_4addr *) skb->data;
fc = le16_to_cpu(hdr->frame_ctl);
if (WLAN_FC_GET_TYPE(fc) == IEEE80211_FTYPE_DATA &&
WLAN_FC_GET_STYPE(fc) == IEEE80211_STYPE_DATA) {
@@ -289,7 +289,7 @@ struct sk_buff * hostap_tx_encrypt(struct sk_buff *skb,
{
struct hostap_interface *iface;
local_info_t *local;
- struct ieee80211_hdr *hdr;
+ struct ieee80211_hdr_4addr *hdr;
u16 fc;
int hdr_len, res;
@@ -303,7 +303,7 @@ struct sk_buff * hostap_tx_encrypt(struct sk_buff *skb,
if (local->tkip_countermeasures &&
crypt && crypt->ops && strcmp(crypt->ops->name, "TKIP") == 0) {
- hdr = (struct ieee80211_hdr *) skb->data;
+ hdr = (struct ieee80211_hdr_4addr *) skb->data;
if (net_ratelimit()) {
printk(KERN_DEBUG "%s: TKIP countermeasures: dropped "
"TX packet to " MACSTR "\n",
@@ -317,15 +317,15 @@ struct sk_buff * hostap_tx_encrypt(struct sk_buff *skb,
if (skb == NULL)
return NULL;
- if ((skb_headroom(skb) < crypt->ops->extra_prefix_len ||
- skb_tailroom(skb) < crypt->ops->extra_postfix_len) &&
- pskb_expand_head(skb, crypt->ops->extra_prefix_len,
- crypt->ops->extra_postfix_len, GFP_ATOMIC)) {
+ if ((skb_headroom(skb) < crypt->ops->extra_mpdu_prefix_len ||
+ skb_tailroom(skb) < crypt->ops->extra_mpdu_postfix_len) &&
+ pskb_expand_head(skb, crypt->ops->extra_mpdu_prefix_len,
+ crypt->ops->extra_mpdu_postfix_len, GFP_ATOMIC)) {
kfree_skb(skb);
return NULL;
}
- hdr = (struct ieee80211_hdr *) skb->data;
+ hdr = (struct ieee80211_hdr_4addr *) skb->data;
fc = le16_to_cpu(hdr->frame_ctl);
hdr_len = hostap_80211_get_hdrlen(fc);
@@ -360,7 +360,7 @@ int hostap_master_start_xmit(struct sk_buff *skb, struct net_device *dev)
ap_tx_ret tx_ret;
struct hostap_skb_tx_data *meta;
int no_encrypt = 0;
- struct ieee80211_hdr *hdr;
+ struct ieee80211_hdr_4addr *hdr;
iface = netdev_priv(dev);
local = iface->local;
@@ -403,7 +403,7 @@ int hostap_master_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_ret = hostap_handle_sta_tx(local, &tx);
skb = tx.skb;
meta = (struct hostap_skb_tx_data *) skb->cb;
- hdr = (struct ieee80211_hdr *) skb->data;
+ hdr = (struct ieee80211_hdr_4addr *) skb->data;
fc = le16_to_cpu(hdr->frame_ctl);
switch (tx_ret) {
case AP_TX_CONTINUE:
diff --git a/drivers/net/wireless/hostap/hostap_ap.c b/drivers/net/wireless/hostap/hostap_ap.c
index 930cef8367f..9da94ab7f05 100644
--- a/drivers/net/wireless/hostap/hostap_ap.c
+++ b/drivers/net/wireless/hostap/hostap_ap.c
@@ -591,14 +591,14 @@ static void hostap_ap_tx_cb(struct sk_buff *skb, int ok, void *data)
{
struct ap_data *ap = data;
u16 fc;
- struct ieee80211_hdr *hdr;
+ struct ieee80211_hdr_4addr *hdr;
if (!ap->local->hostapd || !ap->local->apdev) {
dev_kfree_skb(skb);
return;
}
- hdr = (struct ieee80211_hdr *) skb->data;
+ hdr = (struct ieee80211_hdr_4addr *) skb->data;
fc = le16_to_cpu(hdr->frame_ctl);
/* Pass the TX callback frame to the hostapd; use 802.11 header version
@@ -623,7 +623,7 @@ static void hostap_ap_tx_cb_auth(struct sk_buff *skb, int ok, void *data)
{
struct ap_data *ap = data;
struct net_device *dev = ap->local->dev;
- struct ieee80211_hdr *hdr;
+ struct ieee80211_hdr_4addr *hdr;
u16 fc, *pos, auth_alg, auth_transaction, status;
struct sta_info *sta = NULL;
char *txt = NULL;
@@ -633,7 +633,7 @@ static void hostap_ap_tx_cb_auth(struct sk_buff *skb, int ok, void *data)
return;
}
- hdr = (struct ieee80211_hdr *) skb->data;
+ hdr = (struct ieee80211_hdr_4addr *) skb->data;
fc = le16_to_cpu(hdr->frame_ctl);
if (WLAN_FC_GET_TYPE(fc) != IEEE80211_FTYPE_MGMT ||
WLAN_FC_GET_STYPE(fc) != IEEE80211_STYPE_AUTH ||
@@ -692,7 +692,7 @@ static void hostap_ap_tx_cb_assoc(struct sk_buff *skb, int ok, void *data)
{
struct ap_data *ap = data;
struct net_device *dev = ap->local->dev;
- struct ieee80211_hdr *hdr;
+ struct ieee80211_hdr_4addr *hdr;
u16 fc, *pos, status;
struct sta_info *sta = NULL;
char *txt = NULL;
@@ -702,7 +702,7 @@ static void hostap_ap_tx_cb_assoc(struct sk_buff *skb, int ok, void *data)
return;
}
- hdr = (struct ieee80211_hdr *) skb->data;
+ hdr = (struct ieee80211_hdr_4addr *) skb->data;
fc = le16_to_cpu(hdr->frame_ctl);
if (WLAN_FC_GET_TYPE(fc) != IEEE80211_FTYPE_MGMT ||
(WLAN_FC_GET_STYPE(fc) != IEEE80211_STYPE_ASSOC_RESP &&
@@ -757,12 +757,12 @@ static void hostap_ap_tx_cb_assoc(struct sk_buff *skb, int ok, void *data)
static void hostap_ap_tx_cb_poll(struct sk_buff *skb, int ok, void *data)
{
struct ap_data *ap = data;
- struct ieee80211_hdr *hdr;
+ struct ieee80211_hdr_4addr *hdr;
struct sta_info *sta;
if (skb->len < 24)
goto fail;
- hdr = (struct ieee80211_hdr *) skb->data;
+ hdr = (struct ieee80211_hdr_4addr *) skb->data;
if (ok) {
spin_lock(&ap->sta_table_lock);
sta = ap_get_sta(ap, hdr->addr1);
@@ -918,7 +918,7 @@ static void prism2_send_mgmt(struct net_device *dev,
{
struct hostap_interface *iface;
local_info_t *local;
- struct ieee80211_hdr *hdr;
+ struct ieee80211_hdr_4addr *hdr;
u16 fc;
struct sk_buff *skb;
struct hostap_skb_tx_data *meta;
@@ -944,7 +944,7 @@ static void prism2_send_mgmt(struct net_device *dev,
fc = type_subtype;
hdrlen = hostap_80211_get_hdrlen(fc);
- hdr = (struct ieee80211_hdr *) skb_put(skb, hdrlen);
+ hdr = (struct ieee80211_hdr_4addr *) skb_put(skb, hdrlen);
if (body)
memcpy(skb_put(skb, body_len), body, body_len);
@@ -1256,14 +1256,14 @@ static char * ap_auth_make_challenge(struct ap_data *ap)
}
skb = dev_alloc_skb(WLAN_AUTH_CHALLENGE_LEN +
- ap->crypt->extra_prefix_len +
- ap->crypt->extra_postfix_len);
+ ap->crypt->extra_mpdu_prefix_len +
+ ap->crypt->extra_mpdu_postfix_len);
if (skb == NULL) {
kfree(tmpbuf);
return NULL;
}
- skb_reserve(skb, ap->crypt->extra_prefix_len);
+ skb_reserve(skb, ap->crypt->extra_mpdu_prefix_len);
memset(skb_put(skb, WLAN_AUTH_CHALLENGE_LEN), 0,
WLAN_AUTH_CHALLENGE_LEN);
if (ap->crypt->encrypt_mpdu(skb, 0, ap->crypt_priv)) {
@@ -1272,7 +1272,7 @@ static char * ap_auth_make_challenge(struct ap_data *ap)
return NULL;
}
- memcpy(tmpbuf, skb->data + ap->crypt->extra_prefix_len,
+ memcpy(tmpbuf, skb->data + ap->crypt->extra_mpdu_prefix_len,
WLAN_AUTH_CHALLENGE_LEN);
dev_kfree_skb(skb);
@@ -1285,7 +1285,7 @@ static void handle_authen(local_info_t *local, struct sk_buff *skb,
struct hostap_80211_rx_status *rx_stats)
{
struct net_device *dev = local->dev;
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ struct ieee80211_hdr_4addr *hdr = (struct ieee80211_hdr_4addr *) skb->data;
size_t hdrlen;
struct ap_data *ap = local->ap;
char body[8 + WLAN_AUTH_CHALLENGE_LEN], *challenge = NULL;
@@ -1498,7 +1498,7 @@ static void handle_assoc(local_info_t *local, struct sk_buff *skb,
struct hostap_80211_rx_status *rx_stats, int reassoc)
{
struct net_device *dev = local->dev;
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ struct ieee80211_hdr_4addr *hdr = (struct ieee80211_hdr_4addr *) skb->data;
char body[12], *p, *lpos;
int len, left;
u16 *pos;
@@ -1705,7 +1705,7 @@ static void handle_deauth(local_info_t *local, struct sk_buff *skb,
struct hostap_80211_rx_status *rx_stats)
{
struct net_device *dev = local->dev;
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ struct ieee80211_hdr_4addr *hdr = (struct ieee80211_hdr_4addr *) skb->data;
char *body = (char *) (skb->data + IEEE80211_MGMT_HDR_LEN);
int len;
u16 reason_code, *pos;
@@ -1746,7 +1746,7 @@ static void handle_disassoc(local_info_t *local, struct sk_buff *skb,
struct hostap_80211_rx_status *rx_stats)
{
struct net_device *dev = local->dev;
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ struct ieee80211_hdr_4addr *hdr = (struct ieee80211_hdr_4addr *) skb->data;
char *body = skb->data + IEEE80211_MGMT_HDR_LEN;
int len;
u16 reason_code, *pos;
@@ -1784,7 +1784,7 @@ static void handle_disassoc(local_info_t *local, struct sk_buff *skb,
/* Called only as a scheduled task for pending AP frames. */
static void ap_handle_data_nullfunc(local_info_t *local,
- struct ieee80211_hdr *hdr)
+ struct ieee80211_hdr_4addr *hdr)
{
struct net_device *dev = local->dev;
@@ -1801,7 +1801,7 @@ static void ap_handle_data_nullfunc(local_info_t *local,
/* Called only as a scheduled task for pending AP frames. */
static void ap_handle_dropped_data(local_info_t *local,
- struct ieee80211_hdr *hdr)
+ struct ieee80211_hdr_4addr *hdr)
{
struct net_device *dev = local->dev;
struct sta_info *sta;
@@ -1860,7 +1860,7 @@ static void pspoll_send_buffered(local_info_t *local, struct sta_info *sta,
/* Called only as a scheduled task for pending AP frames. */
static void handle_pspoll(local_info_t *local,
- struct ieee80211_hdr *hdr,
+ struct ieee80211_hdr_4addr *hdr,
struct hostap_80211_rx_status *rx_stats)
{
struct net_device *dev = local->dev;
@@ -1979,7 +1979,7 @@ static void handle_wds_oper_queue(void *data)
static void handle_beacon(local_info_t *local, struct sk_buff *skb,
struct hostap_80211_rx_status *rx_stats)
{
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ struct ieee80211_hdr_4addr *hdr = (struct ieee80211_hdr_4addr *) skb->data;
char *body = skb->data + IEEE80211_MGMT_HDR_LEN;
int len, left;
u16 *pos, beacon_int, capability;
@@ -2137,11 +2137,11 @@ static void handle_ap_item(local_info_t *local, struct sk_buff *skb,
struct net_device *dev = local->dev;
#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
u16 fc, type, stype;
- struct ieee80211_hdr *hdr;
+ struct ieee80211_hdr_4addr *hdr;
/* FIX: should give skb->len to handler functions and check that the
* buffer is long enough */
- hdr = (struct ieee80211_hdr *) skb->data;
+ hdr = (struct ieee80211_hdr_4addr *) skb->data;
fc = le16_to_cpu(hdr->frame_ctl);
type = WLAN_FC_GET_TYPE(fc);
stype = WLAN_FC_GET_STYPE(fc);
@@ -2258,7 +2258,7 @@ void hostap_rx(struct net_device *dev, struct sk_buff *skb,
struct hostap_interface *iface;
local_info_t *local;
u16 fc;
- struct ieee80211_hdr *hdr;
+ struct ieee80211_hdr_4addr *hdr;
iface = netdev_priv(dev);
local = iface->local;
@@ -2268,7 +2268,7 @@ void hostap_rx(struct net_device *dev, struct sk_buff *skb,
local->stats.rx_packets++;
- hdr = (struct ieee80211_hdr *) skb->data;
+ hdr = (struct ieee80211_hdr_4addr *) skb->data;
fc = le16_to_cpu(hdr->frame_ctl);
if (local->ap->ap_policy == AP_OTHER_AP_SKIP_ALL &&
@@ -2289,7 +2289,7 @@ void hostap_rx(struct net_device *dev, struct sk_buff *skb,
static void schedule_packet_send(local_info_t *local, struct sta_info *sta)
{
struct sk_buff *skb;
- struct ieee80211_hdr *hdr;
+ struct ieee80211_hdr_4addr *hdr;
struct hostap_80211_rx_status rx_stats;
if (skb_queue_empty(&sta->tx_buf))
@@ -2302,7 +2302,7 @@ static void schedule_packet_send(local_info_t *local, struct sta_info *sta)
return;
}
- hdr = (struct ieee80211_hdr *) skb_put(skb, 16);
+ hdr = (struct ieee80211_hdr_4addr *) skb_put(skb, 16);
/* Generate a fake pspoll frame to start packet delivery */
hdr->frame_ctl = __constant_cpu_to_le16(
@@ -2349,7 +2349,7 @@ static int prism2_ap_get_sta_qual(local_info_t *local, struct sockaddr addr[],
qual[count].noise = HFA384X_LEVEL_TO_dBm(sta->last_rx_silence);
qual[count].updated = sta->last_rx_updated;
- sta->last_rx_updated = 0;
+ sta->last_rx_updated = IW_QUAL_DBM;
count++;
if (count >= buf_size)
@@ -2467,7 +2467,7 @@ static int prism2_ap_translate_scan(struct net_device *dev, char *buffer)
}
#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
- sta->last_rx_updated = 0;
+ sta->last_rx_updated = IW_QUAL_DBM;
/* To be continued, we should make good use of IWEVCUSTOM */
}
@@ -2685,7 +2685,7 @@ ap_tx_ret hostap_handle_sta_tx(local_info_t *local, struct hostap_tx_data *tx)
struct sta_info *sta = NULL;
struct sk_buff *skb = tx->skb;
int set_tim, ret;
- struct ieee80211_hdr *hdr;
+ struct ieee80211_hdr_4addr *hdr;
struct hostap_skb_tx_data *meta;
meta = (struct hostap_skb_tx_data *) skb->cb;
@@ -2694,7 +2694,7 @@ ap_tx_ret hostap_handle_sta_tx(local_info_t *local, struct hostap_tx_data *tx)
meta->iface->type == HOSTAP_INTERFACE_STA)
goto out;
- hdr = (struct ieee80211_hdr *) skb->data;
+ hdr = (struct ieee80211_hdr_4addr *) skb->data;
if (hdr->addr1[0] & 0x01) {
/* broadcast/multicast frame - no AP related processing */
@@ -2821,10 +2821,10 @@ void hostap_handle_sta_release(void *ptr)
void hostap_handle_sta_tx_exc(local_info_t *local, struct sk_buff *skb)
{
struct sta_info *sta;
- struct ieee80211_hdr *hdr;
+ struct ieee80211_hdr_4addr *hdr;
struct hostap_skb_tx_data *meta;
- hdr = (struct ieee80211_hdr *) skb->data;
+ hdr = (struct ieee80211_hdr_4addr *) skb->data;
meta = (struct hostap_skb_tx_data *) skb->cb;
spin_lock(&local->ap->sta_table_lock);
@@ -2892,7 +2892,7 @@ static void hostap_update_sta_ps2(local_info_t *local, struct sta_info *sta,
/* Called only as a tasklet (software IRQ). Called for each RX frame to update
* STA power saving state. pwrmgt is a flag from 802.11 frame_ctl field. */
-int hostap_update_sta_ps(local_info_t *local, struct ieee80211_hdr *hdr)
+int hostap_update_sta_ps(local_info_t *local, struct ieee80211_hdr_4addr *hdr)
{
struct sta_info *sta;
u16 fc;
@@ -2925,12 +2925,12 @@ ap_rx_ret hostap_handle_sta_rx(local_info_t *local, struct net_device *dev,
int ret;
struct sta_info *sta;
u16 fc, type, stype;
- struct ieee80211_hdr *hdr;
+ struct ieee80211_hdr_4addr *hdr;
if (local->ap == NULL)
return AP_RX_CONTINUE;
- hdr = (struct ieee80211_hdr *) skb->data;
+ hdr = (struct ieee80211_hdr_4addr *) skb->data;
fc = le16_to_cpu(hdr->frame_ctl);
type = WLAN_FC_GET_TYPE(fc);
@@ -3058,7 +3058,7 @@ ap_rx_ret hostap_handle_sta_rx(local_info_t *local, struct net_device *dev,
/* Called only as a tasklet (software IRQ) */
int hostap_handle_sta_crypto(local_info_t *local,
- struct ieee80211_hdr *hdr,
+ struct ieee80211_hdr_4addr *hdr,
struct ieee80211_crypt_data **crypt,
void **sta_ptr)
{
@@ -3160,7 +3160,7 @@ int hostap_add_sta(struct ap_data *ap, u8 *sta_addr)
/* Called only as a tasklet (software IRQ) */
int hostap_update_rx_stats(struct ap_data *ap,
- struct ieee80211_hdr *hdr,
+ struct ieee80211_hdr_4addr *hdr,
struct hostap_80211_rx_status *rx_stats)
{
struct sta_info *sta;
@@ -3174,7 +3174,7 @@ int hostap_update_rx_stats(struct ap_data *ap,
sta->last_rx_silence = rx_stats->noise;
sta->last_rx_signal = rx_stats->signal;
sta->last_rx_rate = rx_stats->rate;
- sta->last_rx_updated = 7;
+ sta->last_rx_updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM;
if (rx_stats->rate == 10)
sta->rx_count[0]++;
else if (rx_stats->rate == 20)
diff --git a/drivers/net/wireless/hostap/hostap_ap.h b/drivers/net/wireless/hostap/hostap_ap.h
index 816a52bcea8..6d00df69c2e 100644
--- a/drivers/net/wireless/hostap/hostap_ap.h
+++ b/drivers/net/wireless/hostap/hostap_ap.h
@@ -233,7 +233,7 @@ struct hostap_tx_data {
ap_tx_ret hostap_handle_sta_tx(local_info_t *local, struct hostap_tx_data *tx);
void hostap_handle_sta_release(void *ptr);
void hostap_handle_sta_tx_exc(local_info_t *local, struct sk_buff *skb);
-int hostap_update_sta_ps(local_info_t *local, struct ieee80211_hdr *hdr);
+int hostap_update_sta_ps(local_info_t *local, struct ieee80211_hdr_4addr *hdr);
typedef enum {
AP_RX_CONTINUE, AP_RX_DROP, AP_RX_EXIT, AP_RX_CONTINUE_NOT_AUTHORIZED
} ap_rx_ret;
@@ -241,13 +241,13 @@ ap_rx_ret hostap_handle_sta_rx(local_info_t *local, struct net_device *dev,
struct sk_buff *skb,
struct hostap_80211_rx_status *rx_stats,
int wds);
-int hostap_handle_sta_crypto(local_info_t *local, struct ieee80211_hdr *hdr,
+int hostap_handle_sta_crypto(local_info_t *local, struct ieee80211_hdr_4addr *hdr,
struct ieee80211_crypt_data **crypt,
void **sta_ptr);
int hostap_is_sta_assoc(struct ap_data *ap, u8 *sta_addr);
int hostap_is_sta_authorized(struct ap_data *ap, u8 *sta_addr);
int hostap_add_sta(struct ap_data *ap, u8 *sta_addr);
-int hostap_update_rx_stats(struct ap_data *ap, struct ieee80211_hdr *hdr,
+int hostap_update_rx_stats(struct ap_data *ap, struct ieee80211_hdr_4addr *hdr,
struct hostap_80211_rx_status *rx_stats);
void hostap_update_rates(local_info_t *local);
void hostap_add_wds_links(local_info_t *local);
diff --git a/drivers/net/wireless/hostap/hostap_cs.c b/drivers/net/wireless/hostap/hostap_cs.c
index faa83badf0a..2643976a667 100644
--- a/drivers/net/wireless/hostap/hostap_cs.c
+++ b/drivers/net/wireless/hostap/hostap_cs.c
@@ -492,42 +492,10 @@ static void prism2_pccard_genesis_reset(local_info_t *local, int hcr)
}
-static int prism2_pccard_dev_open(local_info_t *local)
-{
- struct hostap_cs_priv *hw_priv = local->hw_priv;
- hw_priv->link->open++;
- return 0;
-}
-
-
-static int prism2_pccard_dev_close(local_info_t *local)
-{
- struct hostap_cs_priv *hw_priv;
-
- if (local == NULL || local->hw_priv == NULL)
- return 1;
- hw_priv = local->hw_priv;
- if (hw_priv->link == NULL)
- return 1;
-
- if (!hw_priv->link->open) {
- printk(KERN_WARNING "%s: prism2_pccard_dev_close(): "
- "link not open?!\n", local->dev->name);
- return 1;
- }
-
- hw_priv->link->open--;
-
- return 0;
-}
-
-
static struct prism2_helper_functions prism2_pccard_funcs =
{
.card_present = prism2_pccard_card_present,
.cor_sreset = prism2_pccard_cor_sreset,
- .dev_open = prism2_pccard_dev_open,
- .dev_close = prism2_pccard_dev_close,
.genesis_reset = prism2_pccard_genesis_reset,
.hw_type = HOSTAP_HW_PCCARD,
};
@@ -597,13 +565,14 @@ static void prism2_detach(dev_link_t *link)
*linkp = link->next;
/* release net devices */
if (link->priv) {
+ struct hostap_cs_priv *hw_priv;
struct net_device *dev;
struct hostap_interface *iface;
dev = link->priv;
iface = netdev_priv(dev);
- kfree(iface->local->hw_priv);
- iface->local->hw_priv = NULL;
+ hw_priv = iface->local->hw_priv;
prism2_free_local_data(dev);
+ kfree(hw_priv);
}
kfree(link);
}
@@ -883,6 +852,13 @@ static int prism2_event(event_t event, int priority,
{
dev_link_t *link = args->client_data;
struct net_device *dev = (struct net_device *) link->priv;
+ int dev_open = 0;
+
+ if (link->state & DEV_CONFIG) {
+ struct hostap_interface *iface = netdev_priv(dev);
+ if (iface && iface->local)
+ dev_open = iface->local->num_dev_open > 0;
+ }
switch (event) {
case CS_EVENT_CARD_INSERTION:
@@ -911,7 +887,7 @@ static int prism2_event(event_t event, int priority,
case CS_EVENT_RESET_PHYSICAL:
PDEBUG(DEBUG_EXTRA, "%s: CS_EVENT_RESET_PHYSICAL\n", dev_info);
if (link->state & DEV_CONFIG) {
- if (link->open) {
+ if (dev_open) {
netif_stop_queue(dev);
netif_device_detach(dev);
}
@@ -931,8 +907,8 @@ static int prism2_event(event_t event, int priority,
pcmcia_request_configuration(link->handle,
&link->conf);
prism2_hw_shutdown(dev, 1);
- prism2_hw_config(dev, link->open ? 0 : 1);
- if (link->open) {
+ prism2_hw_config(dev, dev_open ? 0 : 1);
+ if (dev_open) {
netif_device_attach(dev);
netif_start_queue(dev);
}
diff --git a/drivers/net/wireless/hostap/hostap_hw.c b/drivers/net/wireless/hostap/hostap_hw.c
index e533a663ded..59fc1557239 100644
--- a/drivers/net/wireless/hostap/hostap_hw.c
+++ b/drivers/net/wireless/hostap/hostap_hw.c
@@ -3322,6 +3322,18 @@ static void prism2_free_local_data(struct net_device *dev)
iface = netdev_priv(dev);
local = iface->local;
+ /* Unregister all netdevs before freeing local data. */
+ list_for_each_safe(ptr, n, &local->hostap_interfaces) {
+ iface = list_entry(ptr, struct hostap_interface, list);
+ if (iface->type == HOSTAP_INTERFACE_MASTER) {
+ /* special handling for this interface below */
+ continue;
+ }
+ hostap_remove_interface(iface->dev, 0, 1);
+ }
+
+ unregister_netdev(local->dev);
+
flush_scheduled_work();
if (timer_pending(&local->crypt_deinit_timer))
@@ -3382,15 +3394,6 @@ static void prism2_free_local_data(struct net_device *dev)
prism2_download_free_data(local->dl_sec);
#endif /* PRISM2_DOWNLOAD_SUPPORT */
- list_for_each_safe(ptr, n, &local->hostap_interfaces) {
- iface = list_entry(ptr, struct hostap_interface, list);
- if (iface->type == HOSTAP_INTERFACE_MASTER) {
- /* special handling for this interface below */
- continue;
- }
- hostap_remove_interface(iface->dev, 0, 1);
- }
-
prism2_clear_set_tim_queue(local);
list_for_each_safe(ptr, n, &local->bss_list) {
@@ -3403,7 +3406,6 @@ static void prism2_free_local_data(struct net_device *dev)
kfree(local->last_scan_results);
kfree(local->generic_elem);
- unregister_netdev(local->dev);
free_netdev(local->dev);
}
diff --git a/drivers/net/wireless/hostap/hostap_ioctl.c b/drivers/net/wireless/hostap/hostap_ioctl.c
index e720369a351..53f5246c40a 100644
--- a/drivers/net/wireless/hostap/hostap_ioctl.c
+++ b/drivers/net/wireless/hostap/hostap_ioctl.c
@@ -50,7 +50,8 @@ static struct iw_statistics *hostap_get_wireless_stats(struct net_device *dev)
#endif /* in_atomic */
if (update && prism2_update_comms_qual(dev) == 0)
- wstats->qual.updated = 7;
+ wstats->qual.updated = IW_QUAL_ALL_UPDATED |
+ IW_QUAL_DBM;
wstats->qual.qual = local->comms_qual;
wstats->qual.level = local->avg_signal;
@@ -59,7 +60,7 @@ static struct iw_statistics *hostap_get_wireless_stats(struct net_device *dev)
wstats->qual.qual = 0;
wstats->qual.level = 0;
wstats->qual.noise = 0;
- wstats->qual.updated = 0;
+ wstats->qual.updated = IW_QUAL_ALL_INVALID;
}
return wstats;
@@ -1827,13 +1828,6 @@ static char * __prism2_translate_scan(local_info_t *local,
iwe.cmd = SIOCGIWAP;
iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
memcpy(iwe.u.ap_addr.sa_data, bssid, ETH_ALEN);
- /* FIX:
- * I do not know how this is possible, but iwe_stream_add_event
- * seems to re-order memcpy execution so that len is set only
- * after copying.. Pre-setting len here "fixes" this, but real
- * problems should be solved (after which these iwe.len
- * settings could be removed from this function). */
- iwe.len = IW_EV_ADDR_LEN;
current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe,
IW_EV_ADDR_LEN);
@@ -1843,7 +1837,6 @@ static char * __prism2_translate_scan(local_info_t *local,
iwe.cmd = SIOCGIWESSID;
iwe.u.data.length = ssid_len;
iwe.u.data.flags = 1;
- iwe.len = IW_EV_POINT_LEN + iwe.u.data.length;
current_ev = iwe_stream_add_point(current_ev, end_buf, &iwe, ssid);
memset(&iwe, 0, sizeof(iwe));
@@ -1859,7 +1852,6 @@ static char * __prism2_translate_scan(local_info_t *local,
iwe.u.mode = IW_MODE_MASTER;
else
iwe.u.mode = IW_MODE_ADHOC;
- iwe.len = IW_EV_UINT_LEN;
current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe,
IW_EV_UINT_LEN);
}
@@ -1877,7 +1869,6 @@ static char * __prism2_translate_scan(local_info_t *local,
if (chan > 0) {
iwe.u.freq.m = freq_list[le16_to_cpu(chan - 1)] * 100000;
iwe.u.freq.e = 1;
- iwe.len = IW_EV_FREQ_LEN;
current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe,
IW_EV_FREQ_LEN);
}
@@ -1894,7 +1885,10 @@ static char * __prism2_translate_scan(local_info_t *local,
iwe.u.qual.noise =
HFA384X_LEVEL_TO_dBm(le16_to_cpu(scan->anl));
}
- iwe.len = IW_EV_QUAL_LEN;
+ iwe.u.qual.updated = IW_QUAL_LEVEL_UPDATED
+ | IW_QUAL_NOISE_UPDATED
+ | IW_QUAL_QUAL_INVALID
+ | IW_QUAL_DBM;
current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe,
IW_EV_QUAL_LEN);
}
@@ -1906,7 +1900,6 @@ static char * __prism2_translate_scan(local_info_t *local,
else
iwe.u.data.flags = IW_ENCODE_DISABLED;
iwe.u.data.length = 0;
- iwe.len = IW_EV_POINT_LEN + iwe.u.data.length;
current_ev = iwe_stream_add_point(current_ev, end_buf, &iwe, "");
/* TODO: add SuppRates into BSS table */
@@ -1930,7 +1923,7 @@ static char * __prism2_translate_scan(local_info_t *local,
}
/* TODO: add BeaconInt,resp_rate,atim into BSS table */
- buf = kmalloc(MAX_WPA_IE_LEN * 2 + 30, GFP_KERNEL);
+ buf = kmalloc(MAX_WPA_IE_LEN * 2 + 30, GFP_ATOMIC);
if (buf && scan) {
memset(&iwe, 0, sizeof(iwe));
iwe.cmd = IWEVCUSTOM;
diff --git a/drivers/net/wireless/hostap/hostap_pci.c b/drivers/net/wireless/hostap/hostap_pci.c
index 025f8cdb556..da0c80fb941 100644
--- a/drivers/net/wireless/hostap/hostap_pci.c
+++ b/drivers/net/wireless/hostap/hostap_pci.c
@@ -59,11 +59,13 @@ static struct pci_device_id prism2_pci_id_table[] __devinitdata = {
static inline void hfa384x_outb_debug(struct net_device *dev, int a, u8 v)
{
struct hostap_interface *iface;
+ struct hostap_pci_priv *hw_priv;
local_info_t *local;
unsigned long flags;
iface = netdev_priv(dev);
local = iface->local;
+ hw_priv = local->hw_priv;
spin_lock_irqsave(&local->lock, flags);
prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_OUTB, a, v);
@@ -74,12 +76,14 @@ static inline void hfa384x_outb_debug(struct net_device *dev, int a, u8 v)
static inline u8 hfa384x_inb_debug(struct net_device *dev, int a)
{
struct hostap_interface *iface;
+ struct hostap_pci_priv *hw_priv;
local_info_t *local;
unsigned long flags;
u8 v;
iface = netdev_priv(dev);
local = iface->local;
+ hw_priv = local->hw_priv;
spin_lock_irqsave(&local->lock, flags);
v = readb(hw_priv->mem_start + a);
@@ -91,11 +95,13 @@ static inline u8 hfa384x_inb_debug(struct net_device *dev, int a)
static inline void hfa384x_outw_debug(struct net_device *dev, int a, u16 v)
{
struct hostap_interface *iface;
+ struct hostap_pci_priv *hw_priv;
local_info_t *local;
unsigned long flags;
iface = netdev_priv(dev);
local = iface->local;
+ hw_priv = local->hw_priv;
spin_lock_irqsave(&local->lock, flags);
prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_OUTW, a, v);
@@ -106,12 +112,14 @@ static inline void hfa384x_outw_debug(struct net_device *dev, int a, u16 v)
static inline u16 hfa384x_inw_debug(struct net_device *dev, int a)
{
struct hostap_interface *iface;
+ struct hostap_pci_priv *hw_priv;
local_info_t *local;
unsigned long flags;
u16 v;
iface = netdev_priv(dev);
local = iface->local;
+ hw_priv = local->hw_priv;
spin_lock_irqsave(&local->lock, flags);
v = readw(hw_priv->mem_start + a);
@@ -277,8 +285,6 @@ static struct prism2_helper_functions prism2_pci_funcs =
{
.card_present = NULL,
.cor_sreset = prism2_pci_cor_sreset,
- .dev_open = NULL,
- .dev_close = NULL,
.genesis_reset = prism2_pci_genesis_reset,
.hw_type = HOSTAP_HW_PCI,
};
@@ -352,8 +358,6 @@ static int prism2_pci_probe(struct pci_dev *pdev,
return hostap_hw_ready(dev);
fail:
- kfree(hw_priv);
-
if (irq_registered && dev)
free_irq(dev->irq, dev);
@@ -364,10 +368,8 @@ static int prism2_pci_probe(struct pci_dev *pdev,
err_out_disable:
pci_disable_device(pdev);
- kfree(hw_priv);
- if (local)
- local->hw_priv = NULL;
prism2_free_local_data(dev);
+ kfree(hw_priv);
return -ENODEV;
}
@@ -392,9 +394,8 @@ static void prism2_pci_remove(struct pci_dev *pdev)
free_irq(dev->irq, dev);
mem_start = hw_priv->mem_start;
- kfree(hw_priv);
- iface->local->hw_priv = NULL;
prism2_free_local_data(dev);
+ kfree(hw_priv);
iounmap(mem_start);
@@ -441,7 +442,7 @@ static int prism2_pci_resume(struct pci_dev *pdev)
MODULE_DEVICE_TABLE(pci, prism2_pci_id_table);
static struct pci_driver prism2_pci_drv_id = {
- .name = "prism2_pci",
+ .name = "hostap_pci",
.id_table = prism2_pci_id_table,
.probe = prism2_pci_probe,
.remove = prism2_pci_remove,
diff --git a/drivers/net/wireless/hostap/hostap_plx.c b/drivers/net/wireless/hostap/hostap_plx.c
index 474ef83d813..78d67b408b2 100644
--- a/drivers/net/wireless/hostap/hostap_plx.c
+++ b/drivers/net/wireless/hostap/hostap_plx.c
@@ -328,8 +328,6 @@ static struct prism2_helper_functions prism2_plx_funcs =
{
.card_present = NULL,
.cor_sreset = prism2_plx_cor_sreset,
- .dev_open = NULL,
- .dev_close = NULL,
.genesis_reset = prism2_plx_genesis_reset,
.hw_type = HOSTAP_HW_PLX,
};
@@ -570,10 +568,8 @@ static int prism2_plx_probe(struct pci_dev *pdev,
return hostap_hw_ready(dev);
fail:
- kfree(hw_priv);
- if (local)
- local->hw_priv = NULL;
prism2_free_local_data(dev);
+ kfree(hw_priv);
if (irq_registered && dev)
free_irq(dev->irq, dev);
@@ -606,9 +602,8 @@ static void prism2_plx_remove(struct pci_dev *pdev)
if (dev->irq)
free_irq(dev->irq, dev);
- kfree(iface->local->hw_priv);
- iface->local->hw_priv = NULL;
prism2_free_local_data(dev);
+ kfree(hw_priv);
pci_disable_device(pdev);
}
@@ -616,7 +611,7 @@ static void prism2_plx_remove(struct pci_dev *pdev)
MODULE_DEVICE_TABLE(pci, prism2_plx_id_table);
static struct pci_driver prism2_plx_drv_id = {
- .name = "prism2_plx",
+ .name = "hostap_plx",
.id_table = prism2_plx_id_table,
.probe = prism2_plx_probe,
.remove = prism2_plx_remove,
diff --git a/drivers/net/wireless/hostap/hostap_wlan.h b/drivers/net/wireless/hostap/hostap_wlan.h
index cc061e1560d..cfd80155949 100644
--- a/drivers/net/wireless/hostap/hostap_wlan.h
+++ b/drivers/net/wireless/hostap/hostap_wlan.h
@@ -552,8 +552,6 @@ struct prism2_helper_functions {
* (hostap_{cs,plx,pci}.c */
int (*card_present)(local_info_t *local);
void (*cor_sreset)(local_info_t *local);
- int (*dev_open)(local_info_t *local);
- int (*dev_close)(local_info_t *local);
void (*genesis_reset)(local_info_t *local, int hcr);
/* the following functions are from hostap_hw.c, but they may have some
diff --git a/drivers/net/wireless/ipw2100.c b/drivers/net/wireless/ipw2100.c
index 2414e6493aa..ad7f8cd76db 100644
--- a/drivers/net/wireless/ipw2100.c
+++ b/drivers/net/wireless/ipw2100.c
@@ -800,8 +800,7 @@ static int ipw2100_hw_send_command(struct ipw2100_priv *priv,
* doesn't seem to have as many firmware restart cycles...
*
* As a test, we're sticking in a 1/100s delay here */
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(HZ / 100);
+ schedule_timeout_uninterruptible(msecs_to_jiffies(10));
return 0;
@@ -1256,8 +1255,7 @@ static int ipw2100_start_adapter(struct ipw2100_priv *priv)
IPW_DEBUG_FW("Waiting for f/w initialization to complete...\n");
i = 5000;
do {
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(40 * HZ / 1000);
+ schedule_timeout_uninterruptible(msecs_to_jiffies(40));
/* Todo... wait for sync command ... */
read_register(priv->net_dev, IPW_REG_INTA, &inta);
@@ -1411,8 +1409,7 @@ static int ipw2100_hw_phy_off(struct ipw2100_priv *priv)
(val2 & IPW2100_COMMAND_PHY_OFF))
return 0;
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(HW_PHY_OFF_LOOP_DELAY);
+ schedule_timeout_uninterruptible(HW_PHY_OFF_LOOP_DELAY);
}
return -EIO;
@@ -1466,7 +1463,7 @@ fail_up:
static int ipw2100_hw_stop_adapter(struct ipw2100_priv *priv)
{
-#define HW_POWER_DOWN_DELAY (HZ / 10)
+#define HW_POWER_DOWN_DELAY (msecs_to_jiffies(100))
struct host_command cmd = {
.host_command = HOST_PRE_POWER_DOWN,
@@ -1520,10 +1517,8 @@ static int ipw2100_hw_stop_adapter(struct ipw2100_priv *priv)
printk(KERN_WARNING DRV_NAME ": "
"%s: Power down command failed: Error %d\n",
priv->net_dev->name, err);
- else {
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(HW_POWER_DOWN_DELAY);
- }
+ else
+ schedule_timeout_uninterruptible(HW_POWER_DOWN_DELAY);
}
priv->status &= ~STATUS_ENABLED;
@@ -2953,7 +2948,7 @@ static void ipw2100_tx_send_data(struct ipw2100_priv *priv)
int next = txq->next;
int i = 0;
struct ipw2100_data_header *ipw_hdr;
- struct ieee80211_hdr *hdr;
+ struct ieee80211_hdr_3addr *hdr;
while (!list_empty(&priv->tx_pend_list)) {
/* if there isn't enough space in TBD queue, then
@@ -2989,7 +2984,7 @@ static void ipw2100_tx_send_data(struct ipw2100_priv *priv)
packet->index = txq->next;
ipw_hdr = packet->info.d_struct.data;
- hdr = (struct ieee80211_hdr *)packet->info.d_struct.txb->
+ hdr = (struct ieee80211_hdr_3addr *)packet->info.d_struct.txb->
fragments[0]->data;
if (priv->ieee->iw_mode == IW_MODE_INFRA) {
@@ -3274,7 +3269,8 @@ static irqreturn_t ipw2100_interrupt(int irq, void *data,
return IRQ_NONE;
}
-static int ipw2100_tx(struct ieee80211_txb *txb, struct net_device *dev)
+static int ipw2100_tx(struct ieee80211_txb *txb, struct net_device *dev,
+ int pri)
{
struct ipw2100_priv *priv = ieee80211_priv(dev);
struct list_head *element;
diff --git a/drivers/net/wireless/ipw2100.h b/drivers/net/wireless/ipw2100.h
index 2a3cdbd5016..c9e99ce15d6 100644
--- a/drivers/net/wireless/ipw2100.h
+++ b/drivers/net/wireless/ipw2100.h
@@ -808,7 +808,7 @@ struct ipw2100_priv {
struct ipw2100_rx {
union {
unsigned char payload[IPW_RX_NIC_BUFFER_LENGTH];
- struct ieee80211_hdr header;
+ struct ieee80211_hdr_4addr header;
u32 status;
struct ipw2100_notification notification;
struct ipw2100_cmd_header command;
diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c
index b7f275c00de..de4e6c23e4b 100644
--- a/drivers/net/wireless/ipw2200.c
+++ b/drivers/net/wireless/ipw2200.c
@@ -4904,7 +4904,7 @@ static void ipw_rx(struct ipw_priv *priv)
{
struct ipw_rx_mem_buffer *rxb;
struct ipw_rx_packet *pkt;
- struct ieee80211_hdr *header;
+ struct ieee80211_hdr_4addr *header;
u32 r, w, i;
u8 network_packet;
@@ -4967,8 +4967,9 @@ static void ipw_rx(struct ipw_priv *priv)
#endif
header =
- (struct ieee80211_hdr *)(rxb->skb->data +
- IPW_RX_FRAME_SIZE);
+ (struct ieee80211_hdr_4addr *)(rxb->skb->
+ data +
+ IPW_RX_FRAME_SIZE);
/* TODO: Check Ad-Hoc dest/source and make sure
* that we are actually parsing these packets
* correctly -- we should probably use the
@@ -5317,8 +5318,6 @@ static int ipw_wx_set_freq(struct net_device *dev,
IPW_DEBUG_WX("SET Freq/Channel -> %d \n", fwrq->m);
return ipw_set_channel(priv, (u8) fwrq->m);
-
- return 0;
}
static int ipw_wx_get_freq(struct net_device *dev,
@@ -6010,12 +6009,12 @@ static int ipw_wx_set_wireless_mode(struct net_device *dev,
}
if (priv->adapter == IPW_2915ABG) {
- priv->ieee->abg_ture = 1;
+ priv->ieee->abg_true = 1;
if (mode & IEEE_A) {
band |= IEEE80211_52GHZ_BAND;
modulation |= IEEE80211_OFDM_MODULATION;
} else
- priv->ieee->abg_ture = 0;
+ priv->ieee->abg_true = 0;
} else {
if (mode & IEEE_A) {
IPW_WARNING("Attempt to set 2200BG into "
@@ -6023,20 +6022,20 @@ static int ipw_wx_set_wireless_mode(struct net_device *dev,
return -EINVAL;
}
- priv->ieee->abg_ture = 0;
+ priv->ieee->abg_true = 0;
}
if (mode & IEEE_B) {
band |= IEEE80211_24GHZ_BAND;
modulation |= IEEE80211_CCK_MODULATION;
} else
- priv->ieee->abg_ture = 0;
+ priv->ieee->abg_true = 0;
if (mode & IEEE_G) {
band |= IEEE80211_24GHZ_BAND;
modulation |= IEEE80211_OFDM_MODULATION;
} else
- priv->ieee->abg_ture = 0;
+ priv->ieee->abg_true = 0;
priv->ieee->mode = mode;
priv->ieee->freq_band = band;
@@ -6325,7 +6324,7 @@ we need to heavily modify the ieee80211_skb_to_txb.
static inline void ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb)
{
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)
+ struct ieee80211_hdr_3addr *hdr = (struct ieee80211_hdr_3addr *)
txb->fragments[0]->data;
int i = 0;
struct tfd_frame *tfd;
@@ -6448,7 +6447,7 @@ static inline void ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb)
}
static int ipw_net_hard_start_xmit(struct ieee80211_txb *txb,
- struct net_device *dev)
+ struct net_device *dev, int pri)
{
struct ipw_priv *priv = ieee80211_priv(dev);
unsigned long flags;
@@ -7108,7 +7107,7 @@ static int ipw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
printk(KERN_INFO DRV_NAME
": Detected Intel PRO/Wireless 2915ABG Network "
"Connection\n");
- priv->ieee->abg_ture = 1;
+ priv->ieee->abg_true = 1;
band = IEEE80211_52GHZ_BAND | IEEE80211_24GHZ_BAND;
modulation = IEEE80211_OFDM_MODULATION |
IEEE80211_CCK_MODULATION;
@@ -7124,7 +7123,7 @@ static int ipw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
": Detected Intel PRO/Wireless 2200BG Network "
"Connection\n");
- priv->ieee->abg_ture = 0;
+ priv->ieee->abg_true = 0;
band = IEEE80211_24GHZ_BAND;
modulation = IEEE80211_OFDM_MODULATION |
IEEE80211_CCK_MODULATION;
diff --git a/drivers/net/wireless/ipw2200.h b/drivers/net/wireless/ipw2200.h
index 5b00882133f..e9cf32bf3e3 100644
--- a/drivers/net/wireless/ipw2200.h
+++ b/drivers/net/wireless/ipw2200.h
@@ -1654,12 +1654,12 @@ static const long ipw_frequencies[] = {
#define IPW_MAX_CONFIG_RETRIES 10
-static inline u32 frame_hdr_len(struct ieee80211_hdr *hdr)
+static inline u32 frame_hdr_len(struct ieee80211_hdr_4addr *hdr)
{
u32 retval;
u16 fc;
- retval = sizeof(struct ieee80211_hdr);
+ retval = sizeof(struct ieee80211_hdr_3addr);
fc = le16_to_cpu(hdr->frame_ctl);
/*
diff --git a/drivers/net/wireless/netwave_cs.c b/drivers/net/wireless/netwave_cs.c
index ca6c03c8992..92793b958e3 100644
--- a/drivers/net/wireless/netwave_cs.c
+++ b/drivers/net/wireless/netwave_cs.c
@@ -57,9 +57,7 @@
#include <linux/bitops.h>
#ifdef CONFIG_NET_RADIO
#include <linux/wireless.h>
-#if WIRELESS_EXT > 12
#include <net/iw_handler.h>
-#endif /* WIRELESS_EXT > 12 */
#endif
#include <pcmcia/cs_types.h>
@@ -225,10 +223,7 @@ static void update_stats(struct net_device *dev);
static struct net_device_stats *netwave_get_stats(struct net_device *dev);
/* Wireless extensions */
-#ifdef WIRELESS_EXT
static struct iw_statistics* netwave_get_wireless_stats(struct net_device *dev);
-#endif
-static int netwave_ioctl(struct net_device *, struct ifreq *, int);
static void set_multicast_list(struct net_device *dev);
@@ -260,26 +255,7 @@ static dev_link_t *dev_list;
because they generally can't be allocated dynamically.
*/
-#if WIRELESS_EXT <= 12
-/* Wireless extensions backward compatibility */
-
-/* Part of iw_handler prototype we need */
-struct iw_request_info
-{
- __u16 cmd; /* Wireless Extension command */
- __u16 flags; /* More to come ;-) */
-};
-
-/* Wireless Extension Backward compatibility - Jean II
- * If the new wireless device private ioctl range is not defined,
- * default to standard device private ioctl range */
-#ifndef SIOCIWFIRSTPRIV
-#define SIOCIWFIRSTPRIV SIOCDEVPRIVATE
-#endif /* SIOCIWFIRSTPRIV */
-
-#else /* WIRELESS_EXT <= 12 */
static const struct iw_handler_def netwave_handler_def;
-#endif /* WIRELESS_EXT <= 12 */
#define SIOCGIPSNAP SIOCIWFIRSTPRIV + 1 /* Site Survey Snapshot */
@@ -319,9 +295,7 @@ typedef struct netwave_private {
struct timer_list watchdog; /* To avoid blocking state */
struct site_survey nss;
struct net_device_stats stats;
-#ifdef WIRELESS_EXT
struct iw_statistics iw_stats; /* Wireless stats */
-#endif
} netwave_private;
#ifdef NETWAVE_STATS
@@ -353,7 +327,6 @@ static inline void wait_WOC(unsigned int iobase)
while ((inb(iobase + NETWAVE_REG_ASR) & 0x8) != 0x8) ;
}
-#ifdef WIRELESS_EXT
static void netwave_snapshot(netwave_private *priv, u_char __iomem *ramBase,
kio_addr_t iobase) {
u_short resultBuffer;
@@ -376,9 +349,7 @@ static void netwave_snapshot(netwave_private *priv, u_char __iomem *ramBase,
sizeof(struct site_survey));
}
}
-#endif
-#ifdef WIRELESS_EXT
/*
* Function netwave_get_wireless_stats (dev)
*
@@ -411,7 +382,6 @@ static struct iw_statistics *netwave_get_wireless_stats(struct net_device *dev)
return &priv->iw_stats;
}
-#endif
/*
* Function netwave_attach (void)
@@ -471,13 +441,7 @@ static dev_link_t *netwave_attach(void)
dev->get_stats = &netwave_get_stats;
dev->set_multicast_list = &set_multicast_list;
/* wireless extensions */
-#if WIRELESS_EXT <= 16
- dev->get_wireless_stats = &netwave_get_wireless_stats;
-#endif /* WIRELESS_EXT <= 16 */
-#if WIRELESS_EXT > 12
dev->wireless_handlers = (struct iw_handler_def *)&netwave_handler_def;
-#endif /* WIRELESS_EXT > 12 */
- dev->do_ioctl = &netwave_ioctl;
dev->tx_timeout = &netwave_watchdog;
dev->watchdog_timeo = TX_TIMEOUT;
@@ -576,13 +540,8 @@ static int netwave_set_nwid(struct net_device *dev,
/* Disable interrupts & save flags */
spin_lock_irqsave(&priv->spinlock, flags);
-#if WIRELESS_EXT > 8
if(!wrqu->nwid.disabled) {
domain = wrqu->nwid.value;
-#else /* WIRELESS_EXT > 8 */
- if(wrqu->nwid.on) {
- domain = wrqu->nwid.nwid;
-#endif /* WIRELESS_EXT > 8 */
printk( KERN_DEBUG "Setting domain to 0x%x%02x\n",
(domain >> 8) & 0x01, domain & 0xff);
wait_WOC(iobase);
@@ -606,15 +565,9 @@ static int netwave_get_nwid(struct net_device *dev,
union iwreq_data *wrqu,
char *extra)
{
-#if WIRELESS_EXT > 8
wrqu->nwid.value = domain;
wrqu->nwid.disabled = 0;
wrqu->nwid.fixed = 1;
-#else /* WIRELESS_EXT > 8 */
- wrqu->nwid.nwid = domain;
- wrqu->nwid.on = 1;
-#endif /* WIRELESS_EXT > 8 */
-
return 0;
}
@@ -657,17 +610,11 @@ static int netwave_get_scramble(struct net_device *dev,
{
key[1] = scramble_key & 0xff;
key[0] = (scramble_key>>8) & 0xff;
-#if WIRELESS_EXT > 8
wrqu->encoding.flags = IW_ENCODE_ENABLED;
wrqu->encoding.length = 2;
-#else /* WIRELESS_EXT > 8 */
- wrqu->encoding.method = 1;
-#endif /* WIRELESS_EXT > 8 */
-
return 0;
}
-#if WIRELESS_EXT > 8
/*
* Wireless Handler : get mode
*/
@@ -683,7 +630,6 @@ static int netwave_get_mode(struct net_device *dev,
return 0;
}
-#endif /* WIRELESS_EXT > 8 */
/*
* Wireless Handler : get range info
@@ -702,11 +648,9 @@ static int netwave_get_range(struct net_device *dev,
/* Set all the info we don't care or don't know about to zero */
memset(range, 0, sizeof(struct iw_range));
-#if WIRELESS_EXT > 10
/* Set the Wireless Extension versions */
range->we_version_compiled = WIRELESS_EXT;
range->we_version_source = 9; /* Nothing for us in v10 and v11 */
-#endif /* WIRELESS_EXT > 10 */
/* Set information in the range struct */
range->throughput = 450 * 1000; /* don't argue on this ! */
@@ -720,16 +664,12 @@ static int netwave_get_range(struct net_device *dev,
range->max_qual.level = 255;
range->max_qual.noise = 0;
-#if WIRELESS_EXT > 7
range->num_bitrates = 1;
range->bitrate[0] = 1000000; /* 1 Mb/s */
-#endif /* WIRELESS_EXT > 7 */
-#if WIRELESS_EXT > 8
range->encoding_size[0] = 2; /* 16 bits scrambling */
range->num_encoding_sizes = 1;
range->max_encoding_tokens = 1; /* Only one key possible */
-#endif /* WIRELESS_EXT > 8 */
return ret;
}
@@ -775,8 +715,6 @@ static const struct iw_priv_args netwave_private_args[] = {
"getsitesurvey" },
};
-#if WIRELESS_EXT > 12
-
static const iw_handler netwave_handler[] =
{
NULL, /* SIOCSIWNAME */
@@ -839,131 +777,8 @@ static const struct iw_handler_def netwave_handler_def =
.standard = (iw_handler *) netwave_handler,
.private = (iw_handler *) netwave_private_handler,
.private_args = (struct iw_priv_args *) netwave_private_args,
-#if WIRELESS_EXT > 16
.get_wireless_stats = netwave_get_wireless_stats,
-#endif /* WIRELESS_EXT > 16 */
};
-#endif /* WIRELESS_EXT > 12 */
-
-/*
- * Function netwave_ioctl (dev, rq, cmd)
- *
- * Perform ioctl : config & info stuff
- * This is the stuff that are treated the wireless extensions (iwconfig)
- *
- */
-static int netwave_ioctl(struct net_device *dev, /* ioctl device */
- struct ifreq *rq, /* Data passed */
- int cmd) /* Ioctl number */
-{
- int ret = 0;
-#ifdef WIRELESS_EXT
-#if WIRELESS_EXT <= 12
- struct iwreq *wrq = (struct iwreq *) rq;
-#endif
-#endif
-
- DEBUG(0, "%s: ->netwave_ioctl(cmd=0x%X)\n", dev->name, cmd);
-
- /* Look what is the request */
- switch(cmd) {
- /* --------------- WIRELESS EXTENSIONS --------------- */
-#ifdef WIRELESS_EXT
-#if WIRELESS_EXT <= 12
- case SIOCGIWNAME:
- netwave_get_name(dev, NULL, &(wrq->u), NULL);
- break;
- case SIOCSIWNWID:
- ret = netwave_set_nwid(dev, NULL, &(wrq->u), NULL);
- break;
- case SIOCGIWNWID:
- ret = netwave_get_nwid(dev, NULL, &(wrq->u), NULL);
- break;
-#if WIRELESS_EXT > 8 /* Note : The API did change... */
- case SIOCGIWENCODE:
- /* Get scramble key */
- if(wrq->u.encoding.pointer != (caddr_t) 0)
- {
- char key[2];
- ret = netwave_get_scramble(dev, NULL, &(wrq->u), key);
- if(copy_to_user(wrq->u.encoding.pointer, key, 2))
- ret = -EFAULT;
- }
- break;
- case SIOCSIWENCODE:
- /* Set scramble key */
- if(wrq->u.encoding.pointer != (caddr_t) 0)
- {
- char key[2];
- if(copy_from_user(key, wrq->u.encoding.pointer, 2))
- {
- ret = -EFAULT;
- break;
- }
- ret = netwave_set_scramble(dev, NULL, &(wrq->u), key);
- }
- break;
- case SIOCGIWMODE:
- /* Mode of operation */
- ret = netwave_get_mode(dev, NULL, &(wrq->u), NULL);
- break;
-#else /* WIRELESS_EXT > 8 */
- case SIOCGIWENCODE:
- /* Get scramble key */
- ret = netwave_get_scramble(dev, NULL, &(wrq->u),
- (char *) &wrq->u.encoding.code);
- break;
- case SIOCSIWENCODE:
- /* Set scramble key */
- ret = netwave_set_scramble(dev, NULL, &(wrq->u),
- (char *) &wrq->u.encoding.code);
- break;
-#endif /* WIRELESS_EXT > 8 */
- case SIOCGIWRANGE:
- /* Basic checking... */
- if(wrq->u.data.pointer != (caddr_t) 0) {
- struct iw_range range;
- ret = netwave_get_range(dev, NULL, &(wrq->u), (char *) &range);
- if (copy_to_user(wrq->u.data.pointer, &range,
- sizeof(struct iw_range)))
- ret = -EFAULT;
- }
- break;
- case SIOCGIWPRIV:
- /* Basic checking... */
- if(wrq->u.data.pointer != (caddr_t) 0) {
- /* Set the number of ioctl available */
- wrq->u.data.length = sizeof(netwave_private_args) / sizeof(netwave_private_args[0]);
-
- /* Copy structure to the user buffer */
- if(copy_to_user(wrq->u.data.pointer,
- (u_char *) netwave_private_args,
- sizeof(netwave_private_args)))
- ret = -EFAULT;
- }
- break;
- case SIOCGIPSNAP:
- if(wrq->u.data.pointer != (caddr_t) 0) {
- char buffer[sizeof( struct site_survey)];
- ret = netwave_get_snap(dev, NULL, &(wrq->u), buffer);
- /* Copy structure to the user buffer */
- if(copy_to_user(wrq->u.data.pointer,
- buffer,
- sizeof( struct site_survey)))
- {
- printk(KERN_DEBUG "Bad buffer!\n");
- break;
- }
- }
- break;
-#endif /* WIRELESS_EXT <= 12 */
-#endif /* WIRELESS_EXT */
- default:
- ret = -EOPNOTSUPP;
- }
-
- return ret;
-}
/*
* Function netwave_pcmcia_config (link)
diff --git a/drivers/net/wireless/orinoco.c b/drivers/net/wireless/orinoco.c
index 15ceaf61575..d3d4ec9e242 100644
--- a/drivers/net/wireless/orinoco.c
+++ b/drivers/net/wireless/orinoco.c
@@ -77,30 +77,16 @@
#define DRIVER_NAME "orinoco"
#include <linux/config.h>
-
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/ptrace.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/timer.h>
-#include <linux/ioport.h>
#include <linux/netdevice.h>
-#include <linux/if_arp.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/wireless.h>
#include <net/iw_handler.h>
#include <net/ieee80211.h>
-#include <net/ieee80211.h>
-
-#include <asm/uaccess.h>
-#include <asm/io.h>
-#include <asm/system.h>
-
-#include "hermes.h"
#include "hermes_rid.h"
#include "orinoco.h"
@@ -137,7 +123,7 @@ MODULE_PARM_DESC(force_monitor, "Allow monitor mode for all firmware versions");
/* We do this this way to avoid ifdefs in the actual code */
#ifdef WIRELESS_SPY
-#define SPY_NUMBER(priv) (priv->spy_number)
+#define SPY_NUMBER(priv) (priv->spy_data.spy_number)
#else
#define SPY_NUMBER(priv) 0
#endif /* WIRELESS_SPY */
@@ -216,31 +202,32 @@ static struct {
/********************************************************************/
/* Used in Event handling.
- * We avoid nested structres as they break on ARM -- Moustafa */
+ * We avoid nested structures as they break on ARM -- Moustafa */
struct hermes_tx_descriptor_802_11 {
/* hermes_tx_descriptor */
- u16 status;
- u16 reserved1;
- u16 reserved2;
- u32 sw_support;
+ __le16 status;
+ __le16 reserved1;
+ __le16 reserved2;
+ __le32 sw_support;
u8 retry_count;
u8 tx_rate;
- u16 tx_control;
+ __le16 tx_control;
- /* ieee802_11_hdr */
- u16 frame_ctl;
- u16 duration_id;
+ /* ieee80211_hdr */
+ __le16 frame_ctl;
+ __le16 duration_id;
u8 addr1[ETH_ALEN];
u8 addr2[ETH_ALEN];
u8 addr3[ETH_ALEN];
- u16 seq_ctl;
+ __le16 seq_ctl;
u8 addr4[ETH_ALEN];
- u16 data_len;
+
+ __le16 data_len;
/* ethhdr */
- unsigned char h_dest[ETH_ALEN]; /* destination eth addr */
- unsigned char h_source[ETH_ALEN]; /* source ether addr */
- unsigned short h_proto; /* packet type ID field */
+ u8 h_dest[ETH_ALEN]; /* destination eth addr */
+ u8 h_source[ETH_ALEN]; /* source ether addr */
+ __be16 h_proto; /* packet type ID field */
/* p8022_hdr */
u8 dsap;
@@ -248,31 +235,31 @@ struct hermes_tx_descriptor_802_11 {
u8 ctrl;
u8 oui[3];
- u16 ethertype;
+ __be16 ethertype;
} __attribute__ ((packed));
/* Rx frame header except compatibility 802.3 header */
struct hermes_rx_descriptor {
/* Control */
- u16 status;
- u32 time;
+ __le16 status;
+ __le32 time;
u8 silence;
u8 signal;
u8 rate;
u8 rxflow;
- u32 reserved;
+ __le32 reserved;
/* 802.11 header */
- u16 frame_ctl;
- u16 duration_id;
+ __le16 frame_ctl;
+ __le16 duration_id;
u8 addr1[ETH_ALEN];
u8 addr2[ETH_ALEN];
u8 addr3[ETH_ALEN];
- u16 seq_ctl;
+ __le16 seq_ctl;
u8 addr4[ETH_ALEN];
/* Data length */
- u16 data_len;
+ __le16 data_len;
} __attribute__ ((packed));
/********************************************************************/
@@ -396,14 +383,14 @@ static struct iw_statistics *orinoco_get_wireless_stats(struct net_device *dev)
/* If a spy address is defined, we report stats of the
* first spy address - Jean II */
if (SPY_NUMBER(priv)) {
- wstats->qual.qual = priv->spy_stat[0].qual;
- wstats->qual.level = priv->spy_stat[0].level;
- wstats->qual.noise = priv->spy_stat[0].noise;
- wstats->qual.updated = priv->spy_stat[0].updated;
+ wstats->qual.qual = priv->spy_data.spy_stat[0].qual;
+ wstats->qual.level = priv->spy_data.spy_stat[0].level;
+ wstats->qual.noise = priv->spy_data.spy_stat[0].noise;
+ wstats->qual.updated = priv->spy_data.spy_stat[0].updated;
}
} else {
struct {
- u16 qual, signal, noise;
+ __le16 qual, signal, noise;
} __attribute__ ((packed)) cq;
err = HERMES_READ_RECORD(hw, USER_BAP,
@@ -505,11 +492,9 @@ static int orinoco_xmit(struct sk_buff *skb, struct net_device *dev)
/* Check packet length, pad short packets, round up odd length */
len = max_t(int, ALIGN(skb->len, 2), ETH_ZLEN);
- if (skb->len < len) {
- skb = skb_padto(skb, len);
- if (skb == NULL)
- goto fail;
- }
+ skb = skb_padto(skb, len);
+ if (skb == NULL)
+ goto fail;
len -= ETH_HLEN;
eh = (struct ethhdr *)skb->data;
@@ -634,16 +619,17 @@ static void __orinoco_ev_txexc(struct net_device *dev, hermes_t *hw)
struct orinoco_private *priv = netdev_priv(dev);
struct net_device_stats *stats = &priv->stats;
u16 fid = hermes_read_regn(hw, TXCOMPLFID);
+ u16 status;
struct hermes_tx_descriptor_802_11 hdr;
int err = 0;
if (fid == DUMMY_FID)
return; /* Nothing's really happened */
- /* Read the frame header */
+ /* Read part of the frame header - we need status and addr1 */
err = hermes_bap_pread(hw, IRQ_BAP, &hdr,
- sizeof(struct hermes_tx_descriptor) +
- sizeof(struct ieee80211_hdr),
+ offsetof(struct hermes_tx_descriptor_802_11,
+ addr2),
fid, 0);
hermes_write_regn(hw, TXCOMPLFID, DUMMY_FID);
@@ -663,8 +649,8 @@ static void __orinoco_ev_txexc(struct net_device *dev, hermes_t *hw)
* exceeded, because that's the only status that really mean
* that this particular node went away.
* Other errors means that *we* screwed up. - Jean II */
- hdr.status = le16_to_cpu(hdr.status);
- if (hdr.status & (HERMES_TXSTAT_RETRYERR | HERMES_TXSTAT_AGEDERR)) {
+ status = le16_to_cpu(hdr.status);
+ if (status & (HERMES_TXSTAT_RETRYERR | HERMES_TXSTAT_AGEDERR)) {
union iwreq_data wrqu;
/* Copy 802.11 dest address.
@@ -723,18 +709,13 @@ static inline int is_ethersnap(void *_hdr)
static inline void orinoco_spy_gather(struct net_device *dev, u_char *mac,
int level, int noise)
{
- struct orinoco_private *priv = netdev_priv(dev);
- int i;
-
- /* Gather wireless spy statistics: for each packet, compare the
- * source address with out list, and if match, get the stats... */
- for (i = 0; i < priv->spy_number; i++)
- if (!memcmp(mac, priv->spy_address[i], ETH_ALEN)) {
- priv->spy_stat[i].level = level - 0x95;
- priv->spy_stat[i].noise = noise - 0x95;
- priv->spy_stat[i].qual = (level > noise) ? (level - noise) : 0;
- priv->spy_stat[i].updated = 7;
- }
+ struct iw_quality wstats;
+ wstats.level = level - 0x95;
+ wstats.noise = noise - 0x95;
+ wstats.qual = (level > noise) ? (level - noise) : 0;
+ wstats.updated = 7;
+ /* Update spy records */
+ wireless_spy_update(dev, mac, &wstats);
}
static void orinoco_stat_gather(struct net_device *dev,
@@ -1055,7 +1036,7 @@ static void orinoco_join_ap(struct net_device *dev)
unsigned long flags;
struct join_req {
u8 bssid[ETH_ALEN];
- u16 channel;
+ __le16 channel;
} __attribute__ ((packed)) req;
const int atom_len = offsetof(struct prism2_scan_apinfo, atim);
struct prism2_scan_apinfo *atom = NULL;
@@ -1070,7 +1051,7 @@ static void orinoco_join_ap(struct net_device *dev)
return;
if (orinoco_lock(priv, &flags) != 0)
- goto out;
+ goto fail_lock;
/* Sanity checks in case user changed something in the meantime */
if (! priv->bssid_fixed)
@@ -1115,8 +1096,10 @@ static void orinoco_join_ap(struct net_device *dev)
printk(KERN_ERR "%s: Error issuing join request\n", dev->name);
out:
- kfree(buf);
orinoco_unlock(priv, &flags);
+
+ fail_lock:
+ kfree(buf);
}
/* Send new BSSID to userspace */
@@ -1134,12 +1117,14 @@ static void orinoco_send_wevents(struct net_device *dev)
err = hermes_read_ltv(hw, IRQ_BAP, HERMES_RID_CURRENTBSSID,
ETH_ALEN, NULL, wrqu.ap_addr.sa_data);
if (err != 0)
- return;
+ goto out;
wrqu.ap_addr.sa_family = ARPHRD_ETHER;
/* Send event to user space */
wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL);
+
+ out:
orinoco_unlock(priv, &flags);
}
@@ -1148,8 +1133,8 @@ static void __orinoco_ev_info(struct net_device *dev, hermes_t *hw)
struct orinoco_private *priv = netdev_priv(dev);
u16 infofid;
struct {
- u16 len;
- u16 type;
+ __le16 len;
+ __le16 type;
} __attribute__ ((packed)) info;
int len, type;
int err;
@@ -2464,6 +2449,10 @@ struct net_device *alloc_orinocodev(int sizeof_card,
dev->get_stats = orinoco_get_stats;
dev->ethtool_ops = &orinoco_ethtool_ops;
dev->wireless_handlers = (struct iw_handler_def *)&orinoco_handler_def;
+#ifdef WIRELESS_SPY
+ priv->wireless_data.spy_data = &priv->spy_data;
+ dev->wireless_data = &priv->wireless_data;
+#endif
dev->change_mtu = orinoco_change_mtu;
dev->set_multicast_list = orinoco_set_multicast_list;
/* we use the default eth_mac_addr for setting the MAC addr */
@@ -2835,7 +2824,7 @@ static int orinoco_ioctl_getiwrange(struct net_device *dev,
}
}
- if ((priv->iw_mode == IW_MODE_ADHOC) && (priv->spy_number == 0)){
+ if ((priv->iw_mode == IW_MODE_ADHOC) && (!SPY_NUMBER(priv))){
/* Quality stats meaningless in ad-hoc mode */
} else {
range->max_qual.qual = 0x8b - 0x2f;
@@ -2882,6 +2871,14 @@ static int orinoco_ioctl_getiwrange(struct net_device *dev,
range->min_r_time = 0;
range->max_r_time = 65535 * 1000; /* ??? */
+ /* Event capability (kernel) */
+ IW_EVENT_CAPA_SET_KERNEL(range->event_capa);
+ /* Event capability (driver) */
+ IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWTHRSPY);
+ IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWAP);
+ IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWSCAN);
+ IW_EVENT_CAPA_SET(range->event_capa, IWEVTXDROP);
+
TRACE_EXIT(dev->name);
return 0;
@@ -3841,92 +3838,6 @@ static int orinoco_ioctl_getrid(struct net_device *dev,
return err;
}
-/* Spy is used for link quality/strength measurements in Ad-Hoc mode
- * Jean II */
-static int orinoco_ioctl_setspy(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *srq,
- char *extra)
-
-{
- struct orinoco_private *priv = netdev_priv(dev);
- struct sockaddr *address = (struct sockaddr *) extra;
- int number = srq->length;
- int i;
- unsigned long flags;
-
- /* Make sure nobody mess with the structure while we do */
- if (orinoco_lock(priv, &flags) != 0)
- return -EBUSY;
-
- /* orinoco_lock() doesn't disable interrupts, so make sure the
- * interrupt rx path don't get confused while we copy */
- priv->spy_number = 0;
-
- if (number > 0) {
- /* Extract the addresses */
- for (i = 0; i < number; i++)
- memcpy(priv->spy_address[i], address[i].sa_data,
- ETH_ALEN);
- /* Reset stats */
- memset(priv->spy_stat, 0,
- sizeof(struct iw_quality) * IW_MAX_SPY);
- /* Set number of addresses */
- priv->spy_number = number;
- }
-
- /* Now, let the others play */
- orinoco_unlock(priv, &flags);
-
- /* Do NOT call commit handler */
- return 0;
-}
-
-static int orinoco_ioctl_getspy(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *srq,
- char *extra)
-{
- struct orinoco_private *priv = netdev_priv(dev);
- struct sockaddr *address = (struct sockaddr *) extra;
- int number;
- int i;
- unsigned long flags;
-
- if (orinoco_lock(priv, &flags) != 0)
- return -EBUSY;
-
- number = priv->spy_number;
- /* Create address struct */
- for (i = 0; i < number; i++) {
- memcpy(address[i].sa_data, priv->spy_address[i], ETH_ALEN);
- address[i].sa_family = AF_UNIX;
- }
- if (number > 0) {
- /* Create address struct */
- for (i = 0; i < number; i++) {
- memcpy(address[i].sa_data, priv->spy_address[i],
- ETH_ALEN);
- address[i].sa_family = AF_UNIX;
- }
- /* Copy stats */
- /* In theory, we should disable irqs while copying the stats
- * because the rx path might update it in the middle...
- * Bah, who care ? - Jean II */
- memcpy(extra + (sizeof(struct sockaddr) * number),
- priv->spy_stat, sizeof(struct iw_quality) * number);
- }
- /* Reset updated flags. */
- for (i = 0; i < number; i++)
- priv->spy_stat[i].updated = 0;
-
- orinoco_unlock(priv, &flags);
-
- srq->length = number;
-
- return 0;
-}
-
/* Trigger a scan (look for other cells in the vicinity */
static int orinoco_ioctl_setscan(struct net_device *dev,
struct iw_request_info *info,
@@ -3999,7 +3910,7 @@ static int orinoco_ioctl_setscan(struct net_device *dev,
HERMES_HOSTSCAN_SYMBOL_BCAST);
break;
case FIRMWARE_TYPE_INTERSIL: {
- u16 req[3];
+ __le16 req[3];
req[0] = cpu_to_le16(0x3fff); /* All channels */
req[1] = cpu_to_le16(0x0001); /* rate 1 Mbps */
@@ -4073,7 +3984,7 @@ static inline int orinoco_translate_scan(struct net_device *dev,
case FIRMWARE_TYPE_INTERSIL:
offset = 4;
if (priv->has_hostscan) {
- atom_len = le16_to_cpup((u16 *)scan);
+ atom_len = le16_to_cpup((__le16 *)scan);
/* Sanity check for atom_len */
if (atom_len < sizeof(struct prism2_scan_apinfo)) {
printk(KERN_ERR "%s: Invalid atom_len in scan data: %d\n",
@@ -4357,8 +4268,10 @@ static const iw_handler orinoco_handler[] = {
[SIOCSIWSENS -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setsens,
[SIOCGIWSENS -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getsens,
[SIOCGIWRANGE -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getiwrange,
- [SIOCSIWSPY -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setspy,
- [SIOCGIWSPY -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getspy,
+ [SIOCSIWSPY -SIOCIWFIRST] = (iw_handler) iw_handler_set_spy,
+ [SIOCGIWSPY -SIOCIWFIRST] = (iw_handler) iw_handler_get_spy,
+ [SIOCSIWTHRSPY-SIOCIWFIRST] = (iw_handler) iw_handler_set_thrspy,
+ [SIOCGIWTHRSPY-SIOCIWFIRST] = (iw_handler) iw_handler_get_thrspy,
[SIOCSIWAP -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setwap,
[SIOCGIWAP -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getwap,
[SIOCSIWSCAN -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setscan,
diff --git a/drivers/net/wireless/orinoco.h b/drivers/net/wireless/orinoco.h
index 2f213a7103f..7a17bb31fc8 100644
--- a/drivers/net/wireless/orinoco.h
+++ b/drivers/net/wireless/orinoco.h
@@ -7,12 +7,11 @@
#ifndef _ORINOCO_H
#define _ORINOCO_H
-#define DRIVER_VERSION "0.15rc2"
+#define DRIVER_VERSION "0.15rc3"
-#include <linux/types.h>
-#include <linux/spinlock.h>
#include <linux/netdevice.h>
#include <linux/wireless.h>
+#include <net/iw_handler.h>
#include <linux/version.h>
#include "hermes.h"
@@ -28,7 +27,7 @@
#define ORINOCO_MAX_KEYS 4
struct orinoco_key {
- u16 len; /* always stored as little-endian */
+ __le16 len; /* always stored as little-endian */
char data[ORINOCO_MAX_KEY_SIZE];
} __attribute__ ((packed));
@@ -36,14 +35,14 @@ struct header_struct {
/* 802.3 */
u8 dest[ETH_ALEN];
u8 src[ETH_ALEN];
- u16 len;
+ __be16 len;
/* 802.2 */
u8 dsap;
u8 ssap;
u8 ctrl;
/* SNAP */
u8 oui[3];
- u16 ethertype;
+ unsigned short ethertype;
} __attribute__ ((packed));
typedef enum {
@@ -112,9 +111,8 @@ struct orinoco_private {
u16 pm_on, pm_mcast, pm_period, pm_timeout;
u16 preamble;
#ifdef WIRELESS_SPY
- int spy_number;
- u_char spy_address[IW_MAX_SPY][ETH_ALEN];
- struct iw_quality spy_stat[IW_MAX_SPY];
+ struct iw_spy_data spy_data; /* iwspy support */
+ struct iw_public_data wireless_data;
#endif
/* Configuration dependent variables */
diff --git a/drivers/net/wireless/orinoco_cs.c b/drivers/net/wireless/orinoco_cs.c
index bedd7f9f23e..dc1128a0097 100644
--- a/drivers/net/wireless/orinoco_cs.c
+++ b/drivers/net/wireless/orinoco_cs.c
@@ -14,33 +14,16 @@
#define PFX DRIVER_NAME ": "
#include <linux/config.h>
-#ifdef __IN_PCMCIA_PACKAGE__
-#include <pcmcia/k_compat.h>
-#endif /* __IN_PCMCIA_PACKAGE__ */
-
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/ptrace.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/ioport.h>
-#include <linux/netdevice.h>
-#include <linux/if_arp.h>
-#include <linux/etherdevice.h>
-#include <linux/wireless.h>
-
+#include <linux/delay.h>
#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/cisreg.h>
#include <pcmcia/ds.h>
-#include <asm/uaccess.h>
-#include <asm/io.h>
-#include <asm/system.h>
-
#include "orinoco.h"
/********************************************************************/
@@ -97,17 +80,8 @@ static dev_link_t *dev_list; /* = NULL */
/* Function prototypes */
/********************************************************************/
-/* device methods */
-static int orinoco_cs_hard_reset(struct orinoco_private *priv);
-
-/* PCMCIA gumpf */
-static void orinoco_cs_config(dev_link_t * link);
-static void orinoco_cs_release(dev_link_t * link);
-static int orinoco_cs_event(event_t event, int priority,
- event_callback_args_t * args);
-
-static dev_link_t *orinoco_cs_attach(void);
-static void orinoco_cs_detach(dev_link_t *);
+static void orinoco_cs_release(dev_link_t *link);
+static void orinoco_cs_detach(dev_link_t *link);
/********************************************************************/
/* Device methods */
@@ -603,49 +577,85 @@ static char version[] __initdata = DRIVER_NAME " " DRIVER_VERSION
"Pavel Roskin <proski@gnu.org>, et al)";
static struct pcmcia_device_id orinoco_cs_ids[] = {
- PCMCIA_DEVICE_MANF_CARD(0x000b, 0x7300),
- PCMCIA_DEVICE_MANF_CARD(0x0138, 0x0002),
- PCMCIA_DEVICE_MANF_CARD(0x0156, 0x0002),
- PCMCIA_DEVICE_MANF_CARD(0x01eb, 0x080a),
- PCMCIA_DEVICE_MANF_CARD(0x0261, 0x0002),
- PCMCIA_DEVICE_MANF_CARD(0x0268, 0x0001),
- PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0305),
- PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1613),
- PCMCIA_DEVICE_MANF_CARD(0x028a, 0x0002),
- PCMCIA_DEVICE_MANF_CARD(0x028a, 0x0673),
- PCMCIA_DEVICE_MANF_CARD(0x02aa, 0x0002),
- PCMCIA_DEVICE_MANF_CARD(0x02ac, 0x0002),
- PCMCIA_DEVICE_MANF_CARD(0x14ea, 0xb001),
- PCMCIA_DEVICE_MANF_CARD(0x50c2, 0x7300),
- PCMCIA_DEVICE_MANF_CARD(0x9005, 0x0021),
- PCMCIA_DEVICE_MANF_CARD(0xc250, 0x0002),
- PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0002),
- PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0005),
+ PCMCIA_DEVICE_MANF_CARD(0x000b, 0x7100), /* SonicWALL Long Range Wireless Card */
+ PCMCIA_DEVICE_MANF_CARD(0x000b, 0x7300), /* Sohoware NCP110, Philips 802.11b */
+ PCMCIA_DEVICE_MANF_CARD(0x0089, 0x0002), /* AnyPoint(TM) Wireless II PC Card */
+ PCMCIA_DEVICE_MANF_CARD(0x0101, 0x0777), /* 3Com AirConnect PCI 777A */
+ PCMCIA_DEVICE_MANF_CARD(0x0126, 0x8000), /* PROXIM RangeLAN-DS/LAN PC CARD */
+ PCMCIA_DEVICE_MANF_CARD(0x0138, 0x0002), /* Compaq WL100 11 Mbps Wireless Adapter */
+ PCMCIA_DEVICE_MANF_CARD(0x0156, 0x0002), /* Lucent Orinoco and old Intersil */
+ PCMCIA_DEVICE_MANF_CARD(0x016b, 0x0001), /* Ericsson WLAN Card C11 */
+ PCMCIA_DEVICE_MANF_CARD(0x01eb, 0x080a), /* Nortel Networks eMobility 802.11 Wireless Adapter */
+ PCMCIA_DEVICE_MANF_CARD(0x01ff, 0x0008), /* Intermec MobileLAN 11Mbps 802.11b WLAN Card */
+ PCMCIA_DEVICE_MANF_CARD(0x0250, 0x0002), /* Samsung SWL2000-N 11Mb/s WLAN Card */
+ PCMCIA_DEVICE_MANF_CARD(0x0261, 0x0002), /* AirWay 802.11 Adapter (PCMCIA) */
+ PCMCIA_DEVICE_MANF_CARD(0x0268, 0x0001), /* ARtem Onair */
+ PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0305), /* Buffalo WLI-PCM-S11 */
+ PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1612), /* Linksys WPC11 Version 2.5 */
+ PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1613), /* Linksys WPC11 Version 3 */
+ PCMCIA_DEVICE_MANF_CARD(0x028a, 0x0002), /* Compaq HNW-100 11 Mbps Wireless Adapter */
+ PCMCIA_DEVICE_MANF_CARD(0x028a, 0x0673), /* Linksys WCF12 Wireless CompactFlash Card */
+ PCMCIA_DEVICE_MANF_CARD(0x02aa, 0x0002), /* ASUS SpaceLink WL-100 */
+ PCMCIA_DEVICE_MANF_CARD(0x02ac, 0x0002), /* SpeedStream SS1021 Wireless Adapter */
+ PCMCIA_DEVICE_MANF_CARD(0x14ea, 0xb001), /* PLANEX RoadLannerWave GW-NS11H */
+ PCMCIA_DEVICE_MANF_CARD(0x50c2, 0x7300), /* Airvast WN-100 */
+ PCMCIA_DEVICE_MANF_CARD(0x9005, 0x0021), /* Adaptec Ultra Wireless ANW-8030 */
+ PCMCIA_DEVICE_MANF_CARD(0xc001, 0x0008), /* CONTEC FLEXSCAN/FX-DDS110-PCC */
+ PCMCIA_DEVICE_MANF_CARD(0xc250, 0x0002), /* Conceptronic CON11Cpro, EMTAC A2424i */
+ PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0002), /* Safeway 802.11b, ZCOMAX AirRunner/XI-300 */
+ PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0005), /* D-Link DCF660, Sandisk Connect SDWCFB-000 */
+ PCMCIA_DEVICE_PROD_ID12(" ", "IEEE 802.11 Wireless LAN/PC Card", 0x3b6e20c8, 0xefccafe9),
PCMCIA_DEVICE_PROD_ID12("3Com", "3CRWE737A AirConnect Wireless LAN PC Card", 0x41240e5b, 0x56010af3),
- PCMCIA_DEVICE_PROD_ID123("Instant Wireless ", " Network PC CARD", "Version 01.02", 0x11d901af, 0x6e9bd926, 0x4b74baa0),
PCMCIA_DEVICE_PROD_ID12("ACTIONTEC", "PRISM Wireless LAN PC Card", 0x393089da, 0xa71e69d5),
+ PCMCIA_DEVICE_PROD_ID12("Addtron", "AWP-100 Wireless PCMCIA", 0xe6ec52ce, 0x08649af2),
+ PCMCIA_DEVICE_PROD_ID123("AIRVAST", "IEEE 802.11b Wireless PCMCIA Card", "HFA3863", 0xea569531, 0x4bcb9645, 0x355cb092),
+ PCMCIA_DEVICE_PROD_ID12("Allied Telesyn", "AT-WCL452 Wireless PCMCIA Radio", 0x5cd01705, 0x4271660f),
+ PCMCIA_DEVICE_PROD_ID12("ASUS", "802_11b_PC_CARD_25", 0x78fc06ee, 0xdb9aa842),
+ PCMCIA_DEVICE_PROD_ID12("ASUS", "802_11B_CF_CARD_25", 0x78fc06ee, 0x45a50c1e),
PCMCIA_DEVICE_PROD_ID12("Avaya Communication", "Avaya Wireless PC Card", 0xd8a43b78, 0x0d341169),
+ PCMCIA_DEVICE_PROD_ID12("BENQ", "AWL100 PCMCIA ADAPTER", 0x35dadc74, 0x01f7fedb),
PCMCIA_DEVICE_PROD_ID12("BUFFALO", "WLI-PCM-L11G", 0x2decece3, 0xf57ca4b3),
+ PCMCIA_DEVICE_PROD_ID12("BUFFALO", "WLI-CF-S11G", 0x2decece3, 0x82067c18),
PCMCIA_DEVICE_PROD_ID12("Cabletron", "RoamAbout 802.11 DS", 0x32d445f5, 0xedeffd90),
+ PCMCIA_DEVICE_PROD_ID12("Compaq", "WL200_11Mbps_Wireless_PCI_Card", 0x54f7c49c, 0x15a75e5b),
+ PCMCIA_DEVICE_PROD_ID123("corega", "WL PCCL-11", "ISL37300P", 0x0a21501a, 0x59868926, 0xc9049a39),
PCMCIA_DEVICE_PROD_ID12("corega K.K.", "Wireless LAN PCC-11", 0x5261440f, 0xa6405584),
PCMCIA_DEVICE_PROD_ID12("corega K.K.", "Wireless LAN PCCA-11", 0x5261440f, 0xdf6115f9),
PCMCIA_DEVICE_PROD_ID12("corega_K.K.", "Wireless_LAN_PCCB-11", 0x29e33311, 0xee7a27ae),
PCMCIA_DEVICE_PROD_ID12("D", "Link DRC-650 11Mbps WLAN Card", 0x71b18589, 0xf144e3ac),
PCMCIA_DEVICE_PROD_ID12("D", "Link DWL-650 11Mbps WLAN Card", 0x71b18589, 0xb6f1b0ab),
+ PCMCIA_DEVICE_PROD_ID12("D-Link Corporation", "D-Link DWL-650H 11Mbps WLAN Adapter", 0xef544d24, 0xcd8ea916),
+ PCMCIA_DEVICE_PROD_ID12("Digital Data Communications", "WPC-0100", 0xfdd73470, 0xe0b6f146),
PCMCIA_DEVICE_PROD_ID12("ELSA", "AirLancer MC-11", 0x4507a33a, 0xef54f0e3),
PCMCIA_DEVICE_PROD_ID12("HyperLink", "Wireless PC Card 11Mbps", 0x56cc3f1a, 0x0bcf220c),
+ PCMCIA_DEVICE_PROD_ID123("Instant Wireless ", " Network PC CARD", "Version 01.02", 0x11d901af, 0x6e9bd926, 0x4b74baa0),
+ PCMCIA_DEVICE_PROD_ID12("Intel", "PRO/Wireless 2011 LAN PC Card", 0x816cc815, 0x07f58077),
PCMCIA_DEVICE_PROD_ID12("INTERSIL", "HFA384x/IEEE", 0x74c5e40d, 0xdb472a18),
+ PCMCIA_DEVICE_PROD_ID12("INTERSIL", "I-GATE 11M PC Card / PC Card plus", 0x74c5e40d, 0x8304ff77),
+ PCMCIA_DEVICE_PROD_ID12("Intersil", "PRISM 2_5 PCMCIA ADAPTER", 0x4b801a17, 0x6345a0bf),
+ PCMCIA_DEVICE_PROD_ID123("Intersil", "PRISM Freedom PCMCIA Adapter", "ISL37100P", 0x4b801a17, 0xf222ec2d, 0x630d52b2),
+ PCMCIA_DEVICE_PROD_ID12("LeArtery", "SYNCBYAIR 11Mbps Wireless LAN PC Card", 0x7e3b326a, 0x49893e92),
+ PCMCIA_DEVICE_PROD_ID12("Linksys", "Wireless CompactFlash Card", 0x0733cc81, 0x0c52f395),
PCMCIA_DEVICE_PROD_ID12("Lucent Technologies", "WaveLAN/IEEE", 0x23eb9949, 0xc562e72a),
PCMCIA_DEVICE_PROD_ID12("MELCO", "WLI-PCM-L11", 0x481e0094, 0x7360e410),
PCMCIA_DEVICE_PROD_ID12("MELCO", "WLI-PCM-L11G", 0x481e0094, 0xf57ca4b3),
PCMCIA_DEVICE_PROD_ID12("Microsoft", "Wireless Notebook Adapter MN-520", 0x5961bf85, 0x6eec8c01),
PCMCIA_DEVICE_PROD_ID12("NCR", "WaveLAN/IEEE", 0x24358cd4, 0xc562e72a),
+ PCMCIA_DEVICE_PROD_ID12("NETGEAR MA401 Wireless PC", "Card", 0xa37434e9, 0x9762e8f1),
PCMCIA_DEVICE_PROD_ID12("NETGEAR MA401RA Wireless PC", "Card", 0x0306467f, 0x9762e8f1),
+ PCMCIA_DEVICE_PROD_ID12("Nortel Networks", "emobility 802.11 Wireless LAN PC Card", 0x2d617ea0, 0x88cd5767),
+ PCMCIA_DEVICE_PROD_ID12("OEM", "PRISM2 IEEE 802.11 PC-Card", 0xfea54c90, 0x48f2bdd6),
+ PCMCIA_DEVICE_PROD_ID12("OTC", "Wireless AirEZY 2411-PCC WLAN Card", 0x4ac44287, 0x235a6bed),
+ PCMCIA_DEVICE_PROD_ID123("PCMCIA", "11M WLAN Card v2.5", "ISL37300P", 0x281f1c5d, 0x6e440487, 0xc9049a39),
PCMCIA_DEVICE_PROD_ID12("PLANEX", "GeoWave/GW-CF110", 0x209f40ab, 0xd9715264),
+ PCMCIA_DEVICE_PROD_ID12("PLANEX", "GeoWave/GW-NS110", 0x209f40ab, 0x46263178),
PCMCIA_DEVICE_PROD_ID12("PROXIM", "LAN PC CARD HARMONY 80211B", 0xc6536a5e, 0x090c3cd9),
PCMCIA_DEVICE_PROD_ID12("PROXIM", "LAN PCI CARD HARMONY 80211B", 0xc6536a5e, 0x9f494e26),
PCMCIA_DEVICE_PROD_ID12("SAMSUNG", "11Mbps WLAN Card", 0x43d74cb4, 0x579bd91b),
- PCMCIA_DEVICE_PROD_ID1("Symbol Technologies", 0x3f02b4d6),
+ PCMCIA_DEVICE_PROD_ID12("SMC", "SMC2632W", 0xc4f8b18b, 0x474a1f2a),
+ PCMCIA_DEVICE_PROD_ID12("Symbol Technologies", "LA4111 Spectrum24 Wireless LAN PC Card", 0x3f02b4d6, 0x3663cb0e),
+ PCMCIA_DEVICE_PROD_ID123("The Linksys Group, Inc.", "Instant Wireless Network PC Card", "ISL37300P", 0xa5f472c2, 0x590eb502, 0xc9049a39),
+ PCMCIA_DEVICE_PROD_ID12("ZoomAir 11Mbps High", "Rate wireless Networking", 0x273fe3db, 0x32a1eaee),
PCMCIA_DEVICE_NULL,
};
MODULE_DEVICE_TABLE(pcmcia, orinoco_cs_ids);
@@ -656,8 +666,8 @@ static struct pcmcia_driver orinoco_driver = {
.name = DRIVER_NAME,
},
.attach = orinoco_cs_attach,
- .event = orinoco_cs_event,
.detach = orinoco_cs_detach,
+ .event = orinoco_cs_event,
.id_table = orinoco_cs_ids,
};
diff --git a/drivers/net/wireless/orinoco_nortel.c b/drivers/net/wireless/orinoco_nortel.c
index 86fa58e5cfa..d8afd51ff8a 100644
--- a/drivers/net/wireless/orinoco_nortel.c
+++ b/drivers/net/wireless/orinoco_nortel.c
@@ -40,29 +40,13 @@
#define PFX DRIVER_NAME ": "
#include <linux/config.h>
-
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/ptrace.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/timer.h>
-#include <linux/ioport.h>
-#include <asm/uaccess.h>
-#include <asm/io.h>
-#include <asm/system.h>
-#include <linux/netdevice.h>
-#include <linux/if_arp.h>
-#include <linux/etherdevice.h>
-#include <linux/list.h>
+#include <linux/delay.h>
#include <linux/pci.h>
-#include <linux/fcntl.h>
-
#include <pcmcia/cisreg.h>
-#include "hermes.h"
#include "orinoco.h"
#define COR_OFFSET (0xe0) /* COR attribute offset of Prism2 PC card */
@@ -108,7 +92,7 @@ static int nortel_pci_cor_reset(struct orinoco_private *priv)
return 0;
}
-int nortel_pci_hw_init(struct nortel_pci_card *card)
+static int nortel_pci_hw_init(struct nortel_pci_card *card)
{
int i;
u32 reg;
diff --git a/drivers/net/wireless/orinoco_pci.c b/drivers/net/wireless/orinoco_pci.c
index 42e03438291..5362c214fc8 100644
--- a/drivers/net/wireless/orinoco_pci.c
+++ b/drivers/net/wireless/orinoco_pci.c
@@ -93,28 +93,12 @@
#define PFX DRIVER_NAME ": "
#include <linux/config.h>
-
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/ptrace.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/timer.h>
-#include <linux/ioport.h>
-#include <linux/netdevice.h>
-#include <linux/if_arp.h>
-#include <linux/etherdevice.h>
-#include <linux/list.h>
+#include <linux/delay.h>
#include <linux/pci.h>
-#include <linux/fcntl.h>
-
-#include <asm/uaccess.h>
-#include <asm/io.h>
-#include <asm/system.h>
-#include "hermes.h"
#include "orinoco.h"
/* All the magic there is from wlan-ng */
diff --git a/drivers/net/wireless/orinoco_plx.c b/drivers/net/wireless/orinoco_plx.c
index 7ab05b89fb3..210e7377654 100644
--- a/drivers/net/wireless/orinoco_plx.c
+++ b/drivers/net/wireless/orinoco_plx.c
@@ -117,29 +117,13 @@
#define PFX DRIVER_NAME ": "
#include <linux/config.h>
-
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/ptrace.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/timer.h>
-#include <linux/ioport.h>
-#include <asm/uaccess.h>
-#include <asm/io.h>
-#include <asm/system.h>
-#include <linux/netdevice.h>
-#include <linux/if_arp.h>
-#include <linux/etherdevice.h>
-#include <linux/list.h>
+#include <linux/delay.h>
#include <linux/pci.h>
-#include <linux/fcntl.h>
-
#include <pcmcia/cisreg.h>
-#include "hermes.h"
#include "orinoco.h"
#define COR_OFFSET (0x3e0) /* COR attribute offset of Prism2 PC card */
diff --git a/drivers/net/wireless/orinoco_tmd.c b/drivers/net/wireless/orinoco_tmd.c
index 85893f42445..5e68b702618 100644
--- a/drivers/net/wireless/orinoco_tmd.c
+++ b/drivers/net/wireless/orinoco_tmd.c
@@ -53,29 +53,13 @@
#define PFX DRIVER_NAME ": "
#include <linux/config.h>
-
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/ptrace.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/timer.h>
-#include <linux/ioport.h>
-#include <asm/uaccess.h>
-#include <asm/io.h>
-#include <asm/system.h>
-#include <linux/netdevice.h>
-#include <linux/if_arp.h>
-#include <linux/etherdevice.h>
-#include <linux/list.h>
+#include <linux/delay.h>
#include <linux/pci.h>
-#include <linux/fcntl.h>
-
#include <pcmcia/cisreg.h>
-#include "hermes.h"
#include "orinoco.h"
#define COR_VALUE (COR_LEVEL_REQ | COR_FUNC_ENA) /* Enable PC card with interrupt in level trigger */
diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/prism54/isl_ioctl.c
index 9a8790e3580..5c1a1adf1ff 100644
--- a/drivers/net/wireless/prism54/isl_ioctl.c
+++ b/drivers/net/wireless/prism54/isl_ioctl.c
@@ -462,14 +462,12 @@ prism54_get_range(struct net_device *ndev, struct iw_request_info *info,
/* txpower is supported in dBm's */
range->txpower_capa = IW_TXPOW_DBM;
-#if WIRELESS_EXT > 16
/* Event capability (kernel + driver) */
range->event_capa[0] = (IW_EVENT_CAPA_K_0 |
IW_EVENT_CAPA_MASK(SIOCGIWTHRSPY) |
IW_EVENT_CAPA_MASK(SIOCGIWAP));
range->event_capa[1] = IW_EVENT_CAPA_K_1;
range->event_capa[4] = IW_EVENT_CAPA_MASK(IWEVCUSTOM);
-#endif /* WIRELESS_EXT > 16 */
if (islpci_get_state(priv) < PRV_STATE_INIT)
return 0;
@@ -693,14 +691,13 @@ prism54_get_scan(struct net_device *ndev, struct iw_request_info *info,
extra + dwrq->length,
&(bsslist->bsslist[i]),
noise);
-#if WIRELESS_EXT > 16
+
/* Check if there is space for one more entry */
if((extra + dwrq->length - current_ev) <= IW_EV_ADDR_LEN) {
/* Ask user space to try again with a bigger buffer */
rvalue = -E2BIG;
break;
}
-#endif /* WIRELESS_EXT > 16 */
}
kfree(bsslist);
@@ -2727,12 +2724,7 @@ const struct iw_handler_def prism54_handler_def = {
.standard = (iw_handler *) prism54_handler,
.private = (iw_handler *) prism54_private_handler,
.private_args = (struct iw_priv_args *) prism54_private_args,
-#if WIRELESS_EXT > 16
.get_wireless_stats = prism54_get_wireless_stats,
-#endif /* WIRELESS_EXT > 16 */
-#if WIRELESS_EXT == 16
- .spy_offset = offsetof(islpci_private, spy_data),
-#endif /* WIRELESS_EXT == 16 */
};
/* For wpa_supplicant */
diff --git a/drivers/net/wireless/prism54/islpci_dev.c b/drivers/net/wireless/prism54/islpci_dev.c
index 6f13d4a8e2d..6c9584a9f28 100644
--- a/drivers/net/wireless/prism54/islpci_dev.c
+++ b/drivers/net/wireless/prism54/islpci_dev.c
@@ -439,8 +439,7 @@ prism54_bring_down(islpci_private *priv)
wmb();
/* wait a while for the device to reset */
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(50*HZ/1000);
+ schedule_timeout_uninterruptible(msecs_to_jiffies(50));
return 0;
}
@@ -491,8 +490,7 @@ islpci_reset_if(islpci_private *priv)
/* The software reset acknowledge needs about 220 msec here.
* Be conservative and wait for up to one second. */
- set_current_state(TASK_UNINTERRUPTIBLE);
- remaining = schedule_timeout(HZ);
+ remaining = schedule_timeout_uninterruptible(HZ);
if(remaining > 0) {
result = 0;
@@ -839,13 +837,9 @@ islpci_setup(struct pci_dev *pdev)
priv->ndev->type = (priv->iw_mode == IW_MODE_MONITOR) ?
priv->monitor_type : ARPHRD_ETHER;
-#if WIRELESS_EXT > 16
/* Add pointers to enable iwspy support. */
priv->wireless_data.spy_data = &priv->spy_data;
ndev->wireless_data = &priv->wireless_data;
-#else /* WIRELESS_EXT > 16 */
- ndev->get_wireless_stats = &prism54_get_wireless_stats;
-#endif /* WIRELESS_EXT > 16 */
/* save the start and end address of the PCI memory area */
ndev->mem_start = (unsigned long) priv->device_base;
diff --git a/drivers/net/wireless/prism54/islpci_dev.h b/drivers/net/wireless/prism54/islpci_dev.h
index 32a1019f1b3..efbed439795 100644
--- a/drivers/net/wireless/prism54/islpci_dev.h
+++ b/drivers/net/wireless/prism54/islpci_dev.h
@@ -100,9 +100,7 @@ typedef struct {
struct iw_spy_data spy_data; /* iwspy support */
-#if WIRELESS_EXT > 16
struct iw_public_data wireless_data;
-#endif /* WIRELESS_EXT > 16 */
int monitor_type; /* ARPHRD_IEEE80211 or ARPHRD_IEEE80211_PRISM */
diff --git a/drivers/net/wireless/prism54/islpci_mgt.c b/drivers/net/wireless/prism54/islpci_mgt.c
index b6f2e5a223b..4937a5ad4b2 100644
--- a/drivers/net/wireless/prism54/islpci_mgt.c
+++ b/drivers/net/wireless/prism54/islpci_mgt.c
@@ -455,7 +455,7 @@ islpci_mgt_transaction(struct net_device *ndev,
struct islpci_mgmtframe **recvframe)
{
islpci_private *priv = netdev_priv(ndev);
- const long wait_cycle_jiffies = (ISL38XX_WAIT_CYCLE * 10 * HZ) / 1000;
+ const long wait_cycle_jiffies = msecs_to_jiffies(ISL38XX_WAIT_CYCLE * 10);
long timeout_left = ISL38XX_MAX_WAIT_CYCLES * wait_cycle_jiffies;
int err;
DEFINE_WAIT(wait);
@@ -475,8 +475,7 @@ islpci_mgt_transaction(struct net_device *ndev,
int timeleft;
struct islpci_mgmtframe *frame;
- set_current_state(TASK_UNINTERRUPTIBLE);
- timeleft = schedule_timeout(wait_cycle_jiffies);
+ timeleft = schedule_timeout_uninterruptible(wait_cycle_jiffies);
frame = xchg(&priv->mgmt_received, NULL);
if (frame) {
if (frame->header->oid == oid) {
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index e9c5ea0f553..70fd6fd8feb 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -1649,28 +1649,28 @@ static iw_stats * ray_get_wireless_stats(struct net_device * dev)
*/
static const iw_handler ray_handler[] = {
- [SIOCSIWCOMMIT-SIOCIWFIRST] (iw_handler) ray_commit,
- [SIOCGIWNAME -SIOCIWFIRST] (iw_handler) ray_get_name,
- [SIOCSIWFREQ -SIOCIWFIRST] (iw_handler) ray_set_freq,
- [SIOCGIWFREQ -SIOCIWFIRST] (iw_handler) ray_get_freq,
- [SIOCSIWMODE -SIOCIWFIRST] (iw_handler) ray_set_mode,
- [SIOCGIWMODE -SIOCIWFIRST] (iw_handler) ray_get_mode,
- [SIOCGIWRANGE -SIOCIWFIRST] (iw_handler) ray_get_range,
+ [SIOCSIWCOMMIT-SIOCIWFIRST] = (iw_handler) ray_commit,
+ [SIOCGIWNAME -SIOCIWFIRST] = (iw_handler) ray_get_name,
+ [SIOCSIWFREQ -SIOCIWFIRST] = (iw_handler) ray_set_freq,
+ [SIOCGIWFREQ -SIOCIWFIRST] = (iw_handler) ray_get_freq,
+ [SIOCSIWMODE -SIOCIWFIRST] = (iw_handler) ray_set_mode,
+ [SIOCGIWMODE -SIOCIWFIRST] = (iw_handler) ray_get_mode,
+ [SIOCGIWRANGE -SIOCIWFIRST] = (iw_handler) ray_get_range,
#ifdef WIRELESS_SPY
- [SIOCSIWSPY -SIOCIWFIRST] (iw_handler) iw_handler_set_spy,
- [SIOCGIWSPY -SIOCIWFIRST] (iw_handler) iw_handler_get_spy,
- [SIOCSIWTHRSPY-SIOCIWFIRST] (iw_handler) iw_handler_set_thrspy,
- [SIOCGIWTHRSPY-SIOCIWFIRST] (iw_handler) iw_handler_get_thrspy,
+ [SIOCSIWSPY -SIOCIWFIRST] = (iw_handler) iw_handler_set_spy,
+ [SIOCGIWSPY -SIOCIWFIRST] = (iw_handler) iw_handler_get_spy,
+ [SIOCSIWTHRSPY-SIOCIWFIRST] = (iw_handler) iw_handler_set_thrspy,
+ [SIOCGIWTHRSPY-SIOCIWFIRST] = (iw_handler) iw_handler_get_thrspy,
#endif /* WIRELESS_SPY */
- [SIOCGIWAP -SIOCIWFIRST] (iw_handler) ray_get_wap,
- [SIOCSIWESSID -SIOCIWFIRST] (iw_handler) ray_set_essid,
- [SIOCGIWESSID -SIOCIWFIRST] (iw_handler) ray_get_essid,
- [SIOCSIWRATE -SIOCIWFIRST] (iw_handler) ray_set_rate,
- [SIOCGIWRATE -SIOCIWFIRST] (iw_handler) ray_get_rate,
- [SIOCSIWRTS -SIOCIWFIRST] (iw_handler) ray_set_rts,
- [SIOCGIWRTS -SIOCIWFIRST] (iw_handler) ray_get_rts,
- [SIOCSIWFRAG -SIOCIWFIRST] (iw_handler) ray_set_frag,
- [SIOCGIWFRAG -SIOCIWFIRST] (iw_handler) ray_get_frag,
+ [SIOCGIWAP -SIOCIWFIRST] = (iw_handler) ray_get_wap,
+ [SIOCSIWESSID -SIOCIWFIRST] = (iw_handler) ray_set_essid,
+ [SIOCGIWESSID -SIOCIWFIRST] = (iw_handler) ray_get_essid,
+ [SIOCSIWRATE -SIOCIWFIRST] = (iw_handler) ray_set_rate,
+ [SIOCGIWRATE -SIOCIWFIRST] = (iw_handler) ray_get_rate,
+ [SIOCSIWRTS -SIOCIWFIRST] = (iw_handler) ray_set_rts,
+ [SIOCGIWRTS -SIOCIWFIRST] = (iw_handler) ray_get_rts,
+ [SIOCSIWFRAG -SIOCIWFIRST] = (iw_handler) ray_set_frag,
+ [SIOCGIWFRAG -SIOCIWFIRST] = (iw_handler) ray_get_frag,
};
#define SIOCSIPFRAMING SIOCIWFIRSTPRIV /* Set framing mode */
@@ -1678,9 +1678,9 @@ static const iw_handler ray_handler[] = {
#define SIOCGIPCOUNTRY SIOCIWFIRSTPRIV + 3 /* Get country code */
static const iw_handler ray_private_handler[] = {
- [0] (iw_handler) ray_set_framing,
- [1] (iw_handler) ray_get_framing,
- [3] (iw_handler) ray_get_country,
+ [0] = (iw_handler) ray_set_framing,
+ [1] = (iw_handler) ray_get_framing,
+ [3] = (iw_handler) ray_get_country,
};
static const struct iw_priv_args ray_private_args[] = {
diff --git a/drivers/net/wireless/spectrum_cs.c b/drivers/net/wireless/spectrum_cs.c
index 39c6cdf7f3f..b1bbc8e8e91 100644
--- a/drivers/net/wireless/spectrum_cs.c
+++ b/drivers/net/wireless/spectrum_cs.c
@@ -22,58 +22,23 @@
#define PFX DRIVER_NAME ": "
#include <linux/config.h>
-#ifdef __IN_PCMCIA_PACKAGE__
-#include <pcmcia/k_compat.h>
-#endif /* __IN_PCMCIA_PACKAGE__ */
-
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/ptrace.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/ioport.h>
-#include <linux/netdevice.h>
-#include <linux/if_arp.h>
-#include <linux/etherdevice.h>
-#include <linux/wireless.h>
-
+#include <linux/delay.h>
+#include <linux/firmware.h>
#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/cisreg.h>
#include <pcmcia/ds.h>
-#include <asm/uaccess.h>
-#include <asm/io.h>
-#include <asm/system.h>
-
#include "orinoco.h"
-/*
- * If SPECTRUM_FW_INCLUDED is defined, the firmware is hardcoded into
- * the driver. Use get_symbol_fw script to generate spectrum_fw.h and
- * copy it to the same directory as spectrum_cs.c.
- *
- * If SPECTRUM_FW_INCLUDED is not defined, the firmware is loaded at the
- * runtime using hotplug. Use the same get_symbol_fw script to generate
- * files symbol_sp24t_prim_fw symbol_sp24t_sec_fw, copy them to the
- * hotplug firmware directory (typically /usr/lib/hotplug/firmware) and
- * make sure that you have hotplug installed and enabled in the kernel.
- */
-/* #define SPECTRUM_FW_INCLUDED 1 */
-
-#ifdef SPECTRUM_FW_INCLUDED
-/* Header with the firmware */
-#include "spectrum_fw.h"
-#else /* !SPECTRUM_FW_INCLUDED */
-#include <linux/firmware.h>
static unsigned char *primsym;
static unsigned char *secsym;
static const char primary_fw_name[] = "symbol_sp24t_prim_fw";
static const char secondary_fw_name[] = "symbol_sp24t_sec_fw";
-#endif /* !SPECTRUM_FW_INCLUDED */
/********************************************************************/
/* Module stuff */
@@ -124,17 +89,8 @@ static dev_link_t *dev_list; /* = NULL */
/* Function prototypes */
/********************************************************************/
-/* device methods */
-static int spectrum_cs_hard_reset(struct orinoco_private *priv);
-
-/* PCMCIA gumpf */
-static void spectrum_cs_config(dev_link_t * link);
-static void spectrum_cs_release(dev_link_t * link);
-static int spectrum_cs_event(event_t event, int priority,
- event_callback_args_t * args);
-
-static dev_link_t *spectrum_cs_attach(void);
-static void spectrum_cs_detach(dev_link_t *);
+static void spectrum_cs_release(dev_link_t *link);
+static void spectrum_cs_detach(dev_link_t *link);
/********************************************************************/
/* Firmware downloader */
@@ -182,8 +138,8 @@ static void spectrum_cs_detach(dev_link_t *);
* Each block has the following structure.
*/
struct dblock {
- u32 _addr; /* adapter address where to write the block */
- u16 _len; /* length of the data only, in bytes */
+ __le32 _addr; /* adapter address where to write the block */
+ __le16 _len; /* length of the data only, in bytes */
char data[0]; /* data to be written */
} __attribute__ ((packed));
@@ -193,9 +149,9 @@ struct dblock {
* items with matching ID should be written.
*/
struct pdr {
- u32 _id; /* record ID */
- u32 _addr; /* adapter address where to write the data */
- u32 _len; /* expected length of the data, in bytes */
+ __le32 _id; /* record ID */
+ __le32 _addr; /* adapter address where to write the data */
+ __le32 _len; /* expected length of the data, in bytes */
char next[0]; /* next PDR starts here */
} __attribute__ ((packed));
@@ -206,8 +162,8 @@ struct pdr {
* be plugged into the secondary firmware.
*/
struct pdi {
- u16 _len; /* length of ID and data, in words */
- u16 _id; /* record ID */
+ __le16 _len; /* length of ID and data, in words */
+ __le16 _id; /* record ID */
char data[0]; /* plug data */
} __attribute__ ((packed));;
@@ -414,7 +370,7 @@ spectrum_plug_pdi(hermes_t *hw, struct pdr *first_pdr, struct pdi *pdi)
/* Read PDA from the adapter */
static int
-spectrum_read_pda(hermes_t *hw, u16 *pda, int pda_len)
+spectrum_read_pda(hermes_t *hw, __le16 *pda, int pda_len)
{
int ret;
int pda_size;
@@ -445,7 +401,7 @@ spectrum_read_pda(hermes_t *hw, u16 *pda, int pda_len)
/* Parse PDA and write the records into the adapter */
static int
spectrum_apply_pda(hermes_t *hw, const struct dblock *first_block,
- u16 *pda)
+ __le16 *pda)
{
int ret;
struct pdi *pdi;
@@ -511,7 +467,7 @@ spectrum_dl_image(hermes_t *hw, dev_link_t *link,
const struct dblock *first_block;
/* Plug Data Area (PDA) */
- u16 pda[PDA_WORDS];
+ __le16 pda[PDA_WORDS];
/* Binary block begins after the 0x1A marker */
ptr = image;
@@ -571,8 +527,6 @@ spectrum_dl_firmware(hermes_t *hw, dev_link_t *link)
{
int ret;
client_handle_t handle = link->handle;
-
-#ifndef SPECTRUM_FW_INCLUDED
const struct firmware *fw_entry;
if (request_firmware(&fw_entry, primary_fw_name,
@@ -592,7 +546,6 @@ spectrum_dl_firmware(hermes_t *hw, dev_link_t *link)
secondary_fw_name);
return -ENOENT;
}
-#endif
/* Load primary firmware */
ret = spectrum_dl_image(hw, link, primsym);
@@ -1085,7 +1038,7 @@ static char version[] __initdata = DRIVER_NAME " " DRIVER_VERSION
static struct pcmcia_device_id spectrum_cs_ids[] = {
PCMCIA_DEVICE_MANF_CARD(0x026c, 0x0001), /* Symbol Spectrum24 LA4100 */
PCMCIA_DEVICE_MANF_CARD(0x0104, 0x0001), /* Socket Communications CF */
- PCMCIA_DEVICE_MANF_CARD(0x0089, 0x0001), /* Intel PRO/Wireless 2011B */
+ PCMCIA_DEVICE_PROD_ID12("Intel", "PRO/Wireless LAN PC Card", 0x816cc815, 0x6fbf459a), /* 2011B, not 2011 */
PCMCIA_DEVICE_NULL,
};
MODULE_DEVICE_TABLE(pcmcia, spectrum_cs_ids);
@@ -1096,8 +1049,8 @@ static struct pcmcia_driver orinoco_driver = {
.name = DRIVER_NAME,
},
.attach = spectrum_cs_attach,
- .event = spectrum_cs_event,
.detach = spectrum_cs_detach,
+ .event = spectrum_cs_event,
.id_table = spectrum_cs_ids,
};
diff --git a/drivers/net/wireless/wavelan.c b/drivers/net/wireless/wavelan.c
index 7a5e20a1789..b0d8b5b0315 100644
--- a/drivers/net/wireless/wavelan.c
+++ b/drivers/net/wireless/wavelan.c
@@ -430,7 +430,6 @@ static void fee_read(unsigned long ioaddr, /* I/O port of the card */
}
}
-#ifdef WIRELESS_EXT /* if the wireless extension exists in the kernel */
/*------------------------------------------------------------------*/
/*
@@ -514,7 +513,6 @@ static void fee_write(unsigned long ioaddr, /* I/O port of the card */
fee_wait(ioaddr, 10, 100);
#endif /* EEPROM_IS_PROTECTED */
}
-#endif /* WIRELESS_EXT */
/************************ I82586 SUBROUTINES *************************/
/*
@@ -973,11 +971,9 @@ static void wv_mmc_show(struct net_device * dev)
mmc_read(ioaddr, 0, (u8 *) & m, sizeof(m));
mmc_out(ioaddr, mmwoff(0, mmw_freeze), 0);
-#ifdef WIRELESS_EXT /* if wireless extension exists in the kernel */
/* Don't forget to update statistics */
lp->wstats.discard.nwid +=
(m.mmr_wrong_nwid_h << 8) | m.mmr_wrong_nwid_l;
-#endif /* WIRELESS_EXT */
printk(KERN_DEBUG "##### WaveLAN modem status registers: #####\n");
#ifdef DEBUG_SHOW_UNUSED
@@ -1499,7 +1495,6 @@ static int wavelan_set_mac_address(struct net_device * dev, void *addr)
}
#endif /* SET_MAC_ADDRESS */
-#ifdef WIRELESS_EXT /* if wireless extensions exist in the kernel */
/*------------------------------------------------------------------*/
/*
@@ -2473,7 +2468,6 @@ static iw_stats *wavelan_get_wireless_stats(struct net_device * dev)
#endif
return &lp->wstats;
}
-#endif /* WIRELESS_EXT */
/************************* PACKET RECEPTION *************************/
/*
@@ -4194,11 +4188,9 @@ static int __init wavelan_config(struct net_device *dev, unsigned short ioaddr)
dev->set_mac_address = &wavelan_set_mac_address;
#endif /* SET_MAC_ADDRESS */
-#ifdef WIRELESS_EXT /* if wireless extension exists in the kernel */
dev->wireless_handlers = &wavelan_handler_def;
lp->wireless_data.spy_data = &lp->spy_data;
dev->wireless_data = &lp->wireless_data;
-#endif
dev->mtu = WAVELAN_MTU;
diff --git a/drivers/net/wireless/wavelan.p.h b/drivers/net/wireless/wavelan.p.h
index 509ff22a6ca..166e28b9a4f 100644
--- a/drivers/net/wireless/wavelan.p.h
+++ b/drivers/net/wireless/wavelan.p.h
@@ -409,11 +409,9 @@
#define MULTICAST_AVOID /* Avoid extra multicast (I'm sceptical). */
#undef SET_MAC_ADDRESS /* Experimental */
-#ifdef WIRELESS_EXT /* If wireless extensions exist in the kernel */
/* Warning: this stuff will slow down the driver. */
#define WIRELESS_SPY /* Enable spying addresses. */
#undef HISTOGRAM /* Enable histogram of signal level. */
-#endif
/****************************** DEBUG ******************************/
@@ -506,12 +504,10 @@ struct net_local
u_short tx_first_free;
u_short tx_first_in_use;
-#ifdef WIRELESS_EXT
iw_stats wstats; /* Wireless-specific statistics */
struct iw_spy_data spy_data;
struct iw_public_data wireless_data;
-#endif
#ifdef HISTOGRAM
int his_number; /* number of intervals */
diff --git a/drivers/net/wireless/wavelan_cs.c b/drivers/net/wireless/wavelan_cs.c
index 183c4732ef6..4b3c98f5c56 100644
--- a/drivers/net/wireless/wavelan_cs.c
+++ b/drivers/net/wireless/wavelan_cs.c
@@ -415,7 +415,6 @@ fee_read(u_long base, /* i/o port of the card */
}
}
-#ifdef WIRELESS_EXT /* If wireless extension exist in the kernel */
/*------------------------------------------------------------------*/
/*
@@ -500,7 +499,6 @@ fee_write(u_long base, /* i/o port of the card */
fee_wait(base, 10, 100);
#endif /* EEPROM_IS_PROTECTED */
}
-#endif /* WIRELESS_EXT */
/******************* WaveLAN Roaming routines... ********************/
@@ -1161,10 +1159,8 @@ wv_mmc_show(struct net_device * dev)
mmc_read(base, 0, (u_char *)&m, sizeof(m));
mmc_out(base, mmwoff(0, mmw_freeze), 0);
-#ifdef WIRELESS_EXT /* If wireless extension exist in the kernel */
/* Don't forget to update statistics */
lp->wstats.discard.nwid += (m.mmr_wrong_nwid_h << 8) | m.mmr_wrong_nwid_l;
-#endif /* WIRELESS_EXT */
spin_unlock_irqrestore(&lp->spinlock, flags);
@@ -1550,7 +1546,6 @@ wavelan_set_mac_address(struct net_device * dev,
}
#endif /* SET_MAC_ADDRESS */
-#ifdef WIRELESS_EXT /* If wireless extension exist in the kernel */
/*------------------------------------------------------------------*/
/*
@@ -2793,7 +2788,6 @@ wavelan_get_wireless_stats(struct net_device * dev)
#endif
return &lp->wstats;
}
-#endif /* WIRELESS_EXT */
/************************* PACKET RECEPTION *************************/
/*
@@ -4679,11 +4673,9 @@ wavelan_attach(void)
dev->watchdog_timeo = WATCHDOG_JIFFIES;
SET_ETHTOOL_OPS(dev, &ops);
-#ifdef WIRELESS_EXT /* If wireless extension exist in the kernel */
dev->wireless_handlers = &wavelan_handler_def;
lp->wireless_data.spy_data = &lp->spy_data;
dev->wireless_data = &lp->wireless_data;
-#endif
/* Other specific data */
dev->mtu = WAVELAN_MTU;
diff --git a/drivers/net/wireless/wavelan_cs.p.h b/drivers/net/wireless/wavelan_cs.p.h
index 01d882be879..724a715089c 100644
--- a/drivers/net/wireless/wavelan_cs.p.h
+++ b/drivers/net/wireless/wavelan_cs.p.h
@@ -472,11 +472,9 @@
#define MULTICAST_AVOID /* Avoid extra multicast (I'm sceptical) */
#undef SET_MAC_ADDRESS /* Experimental */
-#ifdef WIRELESS_EXT /* If wireless extension exist in the kernel */
/* Warning : these stuff will slow down the driver... */
#define WIRELESS_SPY /* Enable spying addresses */
#undef HISTOGRAM /* Enable histogram of sig level... */
-#endif
/****************************** DEBUG ******************************/
@@ -624,12 +622,10 @@ struct net_local
int rfp; /* Last DMA machine receive pointer */
int overrunning; /* Receiver overrun flag */
-#ifdef WIRELESS_EXT
iw_stats wstats; /* Wireless specific stats */
struct iw_spy_data spy_data;
struct iw_public_data wireless_data;
-#endif
#ifdef HISTOGRAM
int his_number; /* Number of intervals */
diff --git a/drivers/net/wireless/wl3501.h b/drivers/net/wireless/wl3501.h
index 7fcbe589c3f..4303c50c2ab 100644
--- a/drivers/net/wireless/wl3501.h
+++ b/drivers/net/wireless/wl3501.h
@@ -548,7 +548,7 @@ struct wl3501_80211_tx_plcp_hdr {
struct wl3501_80211_tx_hdr {
struct wl3501_80211_tx_plcp_hdr pclp_hdr;
- struct ieee80211_hdr mac_hdr;
+ struct ieee80211_hdr_4addr mac_hdr;
} __attribute__ ((packed));
/*
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c
index 0e98a9d9834..a3bd91a6182 100644
--- a/drivers/parisc/ccio-dma.c
+++ b/drivers/parisc/ccio-dma.c
@@ -836,7 +836,7 @@ ccio_unmap_single(struct device *dev, dma_addr_t iova, size_t size,
* This function implements the pci_alloc_consistent function.
*/
static void *
-ccio_alloc_consistent(struct device *dev, size_t size, dma_addr_t *dma_handle, int flag)
+ccio_alloc_consistent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag)
{
void *ret;
#if 0
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
index 82ea68b55df..bd8b3e5a5cd 100644
--- a/drivers/parisc/sba_iommu.c
+++ b/drivers/parisc/sba_iommu.c
@@ -986,7 +986,7 @@ sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size,
* See Documentation/DMA-mapping.txt
*/
static void *sba_alloc_consistent(struct device *hwdev, size_t size,
- dma_addr_t *dma_handle, int gfp)
+ dma_addr_t *dma_handle, gfp_t gfp)
{
void *ret;
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index a6a630a950d..7992bc8cc6a 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -241,7 +241,8 @@ static void __devinit quirk_s3_64M(struct pci_dev *dev)
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_868, quirk_s3_64M );
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_968, quirk_s3_64M );
-static void __devinit quirk_io_region(struct pci_dev *dev, unsigned region, unsigned size, int nr)
+static void __devinit quirk_io_region(struct pci_dev *dev, unsigned region,
+ unsigned size, int nr, const char *name)
{
region &= ~(size-1);
if (region) {
@@ -259,6 +260,7 @@ static void __devinit quirk_io_region(struct pci_dev *dev, unsigned region, unsi
pcibios_bus_to_resource(dev, res, &bus_region);
pci_claim_resource(dev, nr);
+ printk("PCI quirk: region %04x-%04x claimed by %s\n", region, region + size - 1, name);
}
}
@@ -291,25 +293,98 @@ static void __devinit quirk_ali7101_acpi(struct pci_dev *dev)
u16 region;
pci_read_config_word(dev, 0xE0, &region);
- quirk_io_region(dev, region, 64, PCI_BRIDGE_RESOURCES);
+ quirk_io_region(dev, region, 64, PCI_BRIDGE_RESOURCES, "ali7101 ACPI");
pci_read_config_word(dev, 0xE2, &region);
- quirk_io_region(dev, region, 32, PCI_BRIDGE_RESOURCES+1);
+ quirk_io_region(dev, region, 32, PCI_BRIDGE_RESOURCES+1, "ali7101 SMB");
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101, quirk_ali7101_acpi );
+static void piix4_io_quirk(struct pci_dev *dev, const char *name, unsigned int port, unsigned int enable)
+{
+ u32 devres;
+ u32 mask, size, base;
+
+ pci_read_config_dword(dev, port, &devres);
+ if ((devres & enable) != enable)
+ return;
+ mask = (devres >> 16) & 15;
+ base = devres & 0xffff;
+ size = 16;
+ for (;;) {
+ unsigned bit = size >> 1;
+ if ((bit & mask) == bit)
+ break;
+ size = bit;
+ }
+ /*
+ * For now we only print it out. Eventually we'll want to
+ * reserve it (at least if it's in the 0x1000+ range), but
+ * let's get enough confirmation reports first.
+ */
+ base &= -size;
+ printk("%s PIO at %04x-%04x\n", name, base, base + size - 1);
+}
+
+static void piix4_mem_quirk(struct pci_dev *dev, const char *name, unsigned int port, unsigned int enable)
+{
+ u32 devres;
+ u32 mask, size, base;
+
+ pci_read_config_dword(dev, port, &devres);
+ if ((devres & enable) != enable)
+ return;
+ base = devres & 0xffff0000;
+ mask = (devres & 0x3f) << 16;
+ size = 128 << 16;
+ for (;;) {
+ unsigned bit = size >> 1;
+ if ((bit & mask) == bit)
+ break;
+ size = bit;
+ }
+ /*
+ * For now we only print it out. Eventually we'll want to
+ * reserve it, but let's get enough confirmation reports first.
+ */
+ base &= -size;
+ printk("%s MMIO at %04x-%04x\n", name, base, base + size - 1);
+}
+
/*
* PIIX4 ACPI: Two IO regions pointed to by longwords at
* 0x40 (64 bytes of ACPI registers)
* 0x90 (32 bytes of SMB registers)
+ * and a few strange programmable PIIX4 device resources.
*/
static void __devinit quirk_piix4_acpi(struct pci_dev *dev)
{
- u32 region;
+ u32 region, res_a;
pci_read_config_dword(dev, 0x40, &region);
- quirk_io_region(dev, region, 64, PCI_BRIDGE_RESOURCES);
+ quirk_io_region(dev, region, 64, PCI_BRIDGE_RESOURCES, "PIIX4 ACPI");
pci_read_config_dword(dev, 0x90, &region);
- quirk_io_region(dev, region, 32, PCI_BRIDGE_RESOURCES+1);
+ quirk_io_region(dev, region, 32, PCI_BRIDGE_RESOURCES+1, "PIIX4 SMB");
+
+ /* Device resource A has enables for some of the other ones */
+ pci_read_config_dword(dev, 0x5c, &res_a);
+
+ piix4_io_quirk(dev, "PIIX4 devres B", 0x60, 3 << 21);
+ piix4_io_quirk(dev, "PIIX4 devres C", 0x64, 3 << 21);
+
+ /* Device resource D is just bitfields for static resources */
+
+ /* Device 12 enabled? */
+ if (res_a & (1 << 29)) {
+ piix4_io_quirk(dev, "PIIX4 devres E", 0x68, 1 << 20);
+ piix4_mem_quirk(dev, "PIIX4 devres F", 0x6c, 1 << 7);
+ }
+ /* Device 13 enabled? */
+ if (res_a & (1 << 30)) {
+ piix4_io_quirk(dev, "PIIX4 devres G", 0x70, 1 << 20);
+ piix4_mem_quirk(dev, "PIIX4 devres H", 0x74, 1 << 7);
+ }
+ piix4_io_quirk(dev, "PIIX4 devres I", 0x78, 1 << 20);
+ piix4_io_quirk(dev, "PIIX4 devres J", 0x7c, 1 << 20);
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3, quirk_piix4_acpi );
@@ -323,10 +398,10 @@ static void __devinit quirk_ich4_lpc_acpi(struct pci_dev *dev)
u32 region;
pci_read_config_dword(dev, 0x40, &region);
- quirk_io_region(dev, region, 128, PCI_BRIDGE_RESOURCES);
+ quirk_io_region(dev, region, 128, PCI_BRIDGE_RESOURCES, "ICH4 ACPI/GPIO/TCO");
pci_read_config_dword(dev, 0x58, &region);
- quirk_io_region(dev, region, 64, PCI_BRIDGE_RESOURCES+1);
+ quirk_io_region(dev, region, 64, PCI_BRIDGE_RESOURCES+1, "ICH4 GPIO");
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_0, quirk_ich4_lpc_acpi );
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_0, quirk_ich4_lpc_acpi );
@@ -352,7 +427,7 @@ static void __devinit quirk_vt82c586_acpi(struct pci_dev *dev)
if (rev & 0x10) {
pci_read_config_dword(dev, 0x48, &region);
region &= PCI_BASE_ADDRESS_IO_MASK;
- quirk_io_region(dev, region, 256, PCI_BRIDGE_RESOURCES);
+ quirk_io_region(dev, region, 256, PCI_BRIDGE_RESOURCES, "vt82c586 ACPI");
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_3, quirk_vt82c586_acpi );
@@ -372,11 +447,11 @@ static void __devinit quirk_vt82c686_acpi(struct pci_dev *dev)
pci_read_config_word(dev, 0x70, &hm);
hm &= PCI_BASE_ADDRESS_IO_MASK;
- quirk_io_region(dev, hm, 128, PCI_BRIDGE_RESOURCES + 1);
+ quirk_io_region(dev, hm, 128, PCI_BRIDGE_RESOURCES + 1, "vt82c868 HW-mon");
pci_read_config_dword(dev, 0x90, &smb);
smb &= PCI_BASE_ADDRESS_IO_MASK;
- quirk_io_region(dev, smb, 16, PCI_BRIDGE_RESOURCES + 2);
+ quirk_io_region(dev, smb, 16, PCI_BRIDGE_RESOURCES + 2, "vt82c868 SMB");
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4, quirk_vt82c686_acpi );
@@ -391,11 +466,11 @@ static void __devinit quirk_vt8235_acpi(struct pci_dev *dev)
pci_read_config_word(dev, 0x88, &pm);
pm &= PCI_BASE_ADDRESS_IO_MASK;
- quirk_io_region(dev, pm, 128, PCI_BRIDGE_RESOURCES);
+ quirk_io_region(dev, pm, 128, PCI_BRIDGE_RESOURCES, "vt8235 PM");
pci_read_config_word(dev, 0xd0, &smb);
smb &= PCI_BASE_ADDRESS_IO_MASK;
- quirk_io_region(dev, smb, 16, PCI_BRIDGE_RESOURCES + 1);
+ quirk_io_region(dev, smb, 16, PCI_BRIDGE_RESOURCES + 1, "vt8235 SMB");
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235, quirk_vt8235_acpi);
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 657be948baf..28ce3a7ee43 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -40,7 +40,7 @@
* FIXME: IO should be max 256 bytes. However, since we may
* have a P2P bridge below a cardbus bridge, we need 4K.
*/
-#define CARDBUS_IO_SIZE (4*1024)
+#define CARDBUS_IO_SIZE (256)
#define CARDBUS_MEM_SIZE (32*1024*1024)
static void __devinit
diff --git a/drivers/s390/net/fsm.c b/drivers/s390/net/fsm.c
index fa09440d82e..38f50b7129a 100644
--- a/drivers/s390/net/fsm.c
+++ b/drivers/s390/net/fsm.c
@@ -16,7 +16,7 @@ MODULE_LICENSE("GPL");
fsm_instance *
init_fsm(char *name, const char **state_names, const char **event_names, int nr_states,
- int nr_events, const fsm_node *tmpl, int tmpl_len, int order)
+ int nr_events, const fsm_node *tmpl, int tmpl_len, gfp_t order)
{
int i;
fsm_instance *this;
diff --git a/drivers/s390/net/fsm.h b/drivers/s390/net/fsm.h
index f9a011001eb..1b8a7e7c34f 100644
--- a/drivers/s390/net/fsm.h
+++ b/drivers/s390/net/fsm.h
@@ -110,7 +110,7 @@ extern fsm_instance *
init_fsm(char *name, const char **state_names,
const char **event_names,
int nr_states, int nr_events, const fsm_node *tmpl,
- int tmpl_len, int order);
+ int tmpl_len, gfp_t order);
/**
* Releases an FSM
diff --git a/drivers/s390/net/qeth.h b/drivers/s390/net/qeth.h
index 9963479ba89..38a2441564d 100644
--- a/drivers/s390/net/qeth.h
+++ b/drivers/s390/net/qeth.h
@@ -275,6 +275,10 @@ qeth_is_ipa_enabled(struct qeth_ipa_info *ipa, enum qeth_ipa_funcs func)
QETH_IDX_FUNC_LEVEL_IQD_ENA_IPAT, \
QETH_IDX_FUNC_LEVEL_IQD_DIS_IPAT, \
QETH_MAX_QUEUES,0x103}, \
+ {0x1731,0x06,0x1732,0x06,QETH_CARD_TYPE_OSN,0, \
+ QETH_IDX_FUNC_LEVEL_OSAE_ENA_IPAT, \
+ QETH_IDX_FUNC_LEVEL_OSAE_DIS_IPAT, \
+ QETH_MAX_QUEUES,0}, \
{0,0,0,0,0,0,0,0,0}}
#define QETH_REAL_CARD 1
@@ -363,10 +367,22 @@ struct qeth_hdr_layer2 {
__u8 reserved2[16];
} __attribute__ ((packed));
+struct qeth_hdr_osn {
+ __u8 id;
+ __u8 reserved;
+ __u16 seq_no;
+ __u16 reserved2;
+ __u16 control_flags;
+ __u16 pdu_length;
+ __u8 reserved3[18];
+ __u32 ccid;
+} __attribute__ ((packed));
+
struct qeth_hdr {
union {
struct qeth_hdr_layer2 l2;
struct qeth_hdr_layer3 l3;
+ struct qeth_hdr_osn osn;
} hdr;
} __attribute__ ((packed));
@@ -413,6 +429,7 @@ enum qeth_header_ids {
QETH_HEADER_TYPE_LAYER3 = 0x01,
QETH_HEADER_TYPE_LAYER2 = 0x02,
QETH_HEADER_TYPE_TSO = 0x03,
+ QETH_HEADER_TYPE_OSN = 0x04,
};
/* flags for qeth_hdr.ext_flags */
#define QETH_HDR_EXT_VLAN_FRAME 0x01
@@ -582,7 +599,6 @@ enum qeth_card_states {
* Protocol versions
*/
enum qeth_prot_versions {
- QETH_PROT_SNA = 0x0001,
QETH_PROT_IPV4 = 0x0004,
QETH_PROT_IPV6 = 0x0006,
};
@@ -761,6 +777,11 @@ enum qeth_threads {
QETH_RECOVER_THREAD = 2,
};
+struct qeth_osn_info {
+ int (*assist_cb)(struct net_device *dev, void *data);
+ int (*data_cb)(struct sk_buff *skb);
+};
+
struct qeth_card {
struct list_head list;
enum qeth_card_states state;
@@ -803,6 +824,7 @@ struct qeth_card {
int use_hard_stop;
int (*orig_hard_header)(struct sk_buff *,struct net_device *,
unsigned short,void *,void *,unsigned);
+ struct qeth_osn_info osn_info;
};
struct qeth_card_list_struct {
@@ -916,10 +938,12 @@ qeth_get_hlen(__u8 link_type)
static inline unsigned short
qeth_get_netdev_flags(struct qeth_card *card)
{
- if (card->options.layer2)
+ if (card->options.layer2 &&
+ (card->info.type == QETH_CARD_TYPE_OSAE))
return 0;
switch (card->info.type) {
case QETH_CARD_TYPE_IQD:
+ case QETH_CARD_TYPE_OSN:
return IFF_NOARP;
#ifdef CONFIG_QETH_IPV6
default:
@@ -956,9 +980,10 @@ static inline int
qeth_get_max_mtu_for_card(int cardtype)
{
switch (cardtype) {
+
case QETH_CARD_TYPE_UNKNOWN:
- return 61440;
case QETH_CARD_TYPE_OSAE:
+ case QETH_CARD_TYPE_OSN:
return 61440;
case QETH_CARD_TYPE_IQD:
return 57344;
@@ -1004,6 +1029,7 @@ qeth_mtu_is_valid(struct qeth_card * card, int mtu)
case QETH_CARD_TYPE_IQD:
return ((mtu >= 576) &&
(mtu <= card->info.max_mtu + 4096 - 32));
+ case QETH_CARD_TYPE_OSN:
case QETH_CARD_TYPE_UNKNOWN:
default:
return 1;
@@ -1015,6 +1041,7 @@ qeth_get_arphdr_type(int cardtype, int linktype)
{
switch (cardtype) {
case QETH_CARD_TYPE_OSAE:
+ case QETH_CARD_TYPE_OSN:
switch (linktype) {
case QETH_LINK_TYPE_LANE_TR:
case QETH_LINK_TYPE_HSTR:
@@ -1182,4 +1209,16 @@ qeth_fill_header(struct qeth_card *, struct qeth_hdr *,
extern void
qeth_flush_buffers(struct qeth_qdio_out_q *, int, int, int);
+extern int
+qeth_osn_assist(struct net_device *, void *, int);
+
+extern int
+qeth_osn_register(unsigned char *read_dev_no,
+ struct net_device **,
+ int (*assist_cb)(struct net_device *, void *),
+ int (*data_cb)(struct sk_buff *));
+
+extern void
+qeth_osn_deregister(struct net_device *);
+
#endif /* __QETH_H__ */
diff --git a/drivers/s390/net/qeth_fs.h b/drivers/s390/net/qeth_fs.h
index 5c9a51ce91b..c0b4c8d82c4 100644
--- a/drivers/s390/net/qeth_fs.h
+++ b/drivers/s390/net/qeth_fs.h
@@ -12,7 +12,7 @@
#ifndef __QETH_FS_H__
#define __QETH_FS_H__
-#define VERSION_QETH_FS_H "$Revision: 1.9 $"
+#define VERSION_QETH_FS_H "$Revision: 1.10 $"
extern const char *VERSION_QETH_PROC_C;
extern const char *VERSION_QETH_SYS_C;
@@ -43,6 +43,12 @@ extern void
qeth_remove_device_attributes(struct device *dev);
extern int
+qeth_create_device_attributes_osn(struct device *dev);
+
+extern void
+qeth_remove_device_attributes_osn(struct device *dev);
+
+extern int
qeth_create_driver_attributes(void);
extern void
@@ -108,6 +114,8 @@ qeth_get_cardname(struct qeth_card *card)
return " OSD Express";
case QETH_CARD_TYPE_IQD:
return " HiperSockets";
+ case QETH_CARD_TYPE_OSN:
+ return " OSN QDIO";
default:
return " unknown";
}
@@ -153,6 +161,8 @@ qeth_get_cardname_short(struct qeth_card *card)
}
case QETH_CARD_TYPE_IQD:
return "HiperSockets";
+ case QETH_CARD_TYPE_OSN:
+ return "OSN";
default:
return "unknown";
}
diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c
index bd28e2438d7..692003c9f89 100644
--- a/drivers/s390/net/qeth_main.c
+++ b/drivers/s390/net/qeth_main.c
@@ -196,7 +196,6 @@ qeth_notifier_register(struct task_struct *p, int signum)
{
struct qeth_notify_list_struct *n_entry;
-
/*check first if entry already exists*/
spin_lock(&qeth_notify_lock);
list_for_each_entry(n_entry, &qeth_notify_list, list) {
@@ -1024,7 +1023,10 @@ qeth_set_intial_options(struct qeth_card *card)
card->options.fake_broadcast = 0;
card->options.add_hhlen = DEFAULT_ADD_HHLEN;
card->options.fake_ll = 0;
- card->options.layer2 = 0;
+ if (card->info.type == QETH_CARD_TYPE_OSN)
+ card->options.layer2 = 1;
+ else
+ card->options.layer2 = 0;
}
/**
@@ -1113,19 +1115,20 @@ qeth_determine_card_type(struct qeth_card *card)
QETH_DBF_TEXT(setup, 2, "detcdtyp");
+ card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT;
+ card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
while (known_devices[i][4]) {
if ((CARD_RDEV(card)->id.dev_type == known_devices[i][2]) &&
(CARD_RDEV(card)->id.dev_model == known_devices[i][3])) {
card->info.type = known_devices[i][4];
+ card->qdio.no_out_queues = known_devices[i][8];
+ card->info.is_multicast_different = known_devices[i][9];
if (is_1920_device(card)) {
PRINT_INFO("Priority Queueing not able "
"due to hardware limitations!\n");
card->qdio.no_out_queues = 1;
card->qdio.default_out_queue = 0;
- } else {
- card->qdio.no_out_queues = known_devices[i][8];
- }
- card->info.is_multicast_different = known_devices[i][9];
+ }
return 0;
}
i++;
@@ -1149,6 +1152,8 @@ qeth_probe_device(struct ccwgroup_device *gdev)
if (!get_device(dev))
return -ENODEV;
+ QETH_DBF_TEXT_(setup, 2, "%s", gdev->dev.bus_id);
+
card = qeth_alloc_card();
if (!card) {
put_device(dev);
@@ -1158,28 +1163,27 @@ qeth_probe_device(struct ccwgroup_device *gdev)
card->read.ccwdev = gdev->cdev[0];
card->write.ccwdev = gdev->cdev[1];
card->data.ccwdev = gdev->cdev[2];
-
- if ((rc = qeth_setup_card(card))){
- QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
- put_device(dev);
- qeth_free_card(card);
- return rc;
- }
gdev->dev.driver_data = card;
card->gdev = gdev;
gdev->cdev[0]->handler = qeth_irq;
gdev->cdev[1]->handler = qeth_irq;
gdev->cdev[2]->handler = qeth_irq;
- rc = qeth_create_device_attributes(dev);
- if (rc) {
+ if ((rc = qeth_determine_card_type(card))){
+ PRINT_WARN("%s: not a valid card type\n", __func__);
+ QETH_DBF_TEXT_(setup, 2, "3err%d", rc);
+ put_device(dev);
+ qeth_free_card(card);
+ return rc;
+ }
+ if ((rc = qeth_setup_card(card))){
+ QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
put_device(dev);
qeth_free_card(card);
return rc;
}
- if ((rc = qeth_determine_card_type(card))){
- PRINT_WARN("%s: not a valid card type\n", __func__);
- QETH_DBF_TEXT_(setup, 2, "3err%d", rc);
+ rc = qeth_create_device_attributes(dev);
+ if (rc) {
put_device(dev);
qeth_free_card(card);
return rc;
@@ -1660,6 +1664,8 @@ qeth_check_ipa_data(struct qeth_card *card, struct qeth_cmd_buffer *iob)
netif_carrier_on(card->dev);
qeth_schedule_recovery(card);
return NULL;
+ case IPA_CMD_MODCCID:
+ return cmd;
case IPA_CMD_REGISTER_LOCAL_ADDR:
QETH_DBF_TEXT(trace,3, "irla");
break;
@@ -1721,6 +1727,14 @@ qeth_send_control_data_cb(struct qeth_channel *channel,
cmd = qeth_check_ipa_data(card, iob);
if ((cmd == NULL) && (card->state != CARD_STATE_DOWN))
goto out;
+ /*in case of OSN : check if cmd is set */
+ if (card->info.type == QETH_CARD_TYPE_OSN &&
+ cmd &&
+ cmd->hdr.command != IPA_CMD_STARTLAN &&
+ card->osn_info.assist_cb != NULL) {
+ card->osn_info.assist_cb(card->dev, cmd);
+ goto out;
+ }
spin_lock_irqsave(&card->lock, flags);
list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) {
@@ -1737,8 +1751,7 @@ qeth_send_control_data_cb(struct qeth_channel *channel,
keep_reply = reply->callback(card,
reply,
(unsigned long)cmd);
- }
- else
+ } else
keep_reply = reply->callback(card,
reply,
(unsigned long)iob);
@@ -1768,6 +1781,24 @@ out:
qeth_release_buffer(channel,iob);
}
+static inline void
+qeth_prepare_control_data(struct qeth_card *card, int len,
+struct qeth_cmd_buffer *iob)
+{
+ qeth_setup_ccw(&card->write,iob->data,len);
+ iob->callback = qeth_release_buffer;
+
+ memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
+ &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
+ card->seqno.trans_hdr++;
+ memcpy(QETH_PDU_HEADER_SEQ_NO(iob->data),
+ &card->seqno.pdu_hdr, QETH_SEQ_NO_LENGTH);
+ card->seqno.pdu_hdr++;
+ memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob->data),
+ &card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH);
+ QETH_DBF_HEX(control, 2, iob->data, QETH_DBF_CONTROL_LEN);
+}
+
static int
qeth_send_control_data(struct qeth_card *card, int len,
struct qeth_cmd_buffer *iob,
@@ -1778,24 +1809,11 @@ qeth_send_control_data(struct qeth_card *card, int len,
{
int rc;
unsigned long flags;
- struct qeth_reply *reply;
+ struct qeth_reply *reply = NULL;
struct timer_list timer;
QETH_DBF_TEXT(trace, 2, "sendctl");
- qeth_setup_ccw(&card->write,iob->data,len);
-
- memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
- &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
- card->seqno.trans_hdr++;
-
- memcpy(QETH_PDU_HEADER_SEQ_NO(iob->data),
- &card->seqno.pdu_hdr, QETH_SEQ_NO_LENGTH);
- card->seqno.pdu_hdr++;
- memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob->data),
- &card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH);
- iob->callback = qeth_release_buffer;
-
reply = qeth_alloc_reply(card);
if (!reply) {
PRINT_WARN("Could no alloc qeth_reply!\n");
@@ -1810,10 +1828,6 @@ qeth_send_control_data(struct qeth_card *card, int len,
init_timer(&timer);
timer.function = qeth_cmd_timeout;
timer.data = (unsigned long) reply;
- if (IS_IPA(iob->data))
- timer.expires = jiffies + QETH_IPA_TIMEOUT;
- else
- timer.expires = jiffies + QETH_TIMEOUT;
init_waitqueue_head(&reply->wait_q);
spin_lock_irqsave(&card->lock, flags);
list_add_tail(&reply->list, &card->cmd_waiter_list);
@@ -1821,6 +1835,11 @@ qeth_send_control_data(struct qeth_card *card, int len,
QETH_DBF_HEX(control, 2, iob->data, QETH_DBF_CONTROL_LEN);
wait_event(card->wait_q,
atomic_compare_and_swap(0,1,&card->write.irq_pending) == 0);
+ qeth_prepare_control_data(card, len, iob);
+ if (IS_IPA(iob->data))
+ timer.expires = jiffies + QETH_IPA_TIMEOUT;
+ else
+ timer.expires = jiffies + QETH_TIMEOUT;
QETH_DBF_TEXT(trace, 6, "noirqpnd");
spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags);
rc = ccw_device_start(card->write.ccwdev, &card->write.ccw,
@@ -1848,6 +1867,62 @@ qeth_send_control_data(struct qeth_card *card, int len,
}
static int
+qeth_osn_send_control_data(struct qeth_card *card, int len,
+ struct qeth_cmd_buffer *iob)
+{
+ unsigned long flags;
+ int rc = 0;
+
+ QETH_DBF_TEXT(trace, 5, "osndctrd");
+
+ wait_event(card->wait_q,
+ atomic_compare_and_swap(0,1,&card->write.irq_pending) == 0);
+ qeth_prepare_control_data(card, len, iob);
+ QETH_DBF_TEXT(trace, 6, "osnoirqp");
+ spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags);
+ rc = ccw_device_start(card->write.ccwdev, &card->write.ccw,
+ (addr_t) iob, 0, 0);
+ spin_unlock_irqrestore(get_ccwdev_lock(card->write.ccwdev), flags);
+ if (rc){
+ PRINT_WARN("qeth_osn_send_control_data: "
+ "ccw_device_start rc = %i\n", rc);
+ QETH_DBF_TEXT_(trace, 2, " err%d", rc);
+ qeth_release_buffer(iob->channel, iob);
+ atomic_set(&card->write.irq_pending, 0);
+ wake_up(&card->wait_q);
+ }
+ return rc;
+}
+
+static inline void
+qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
+ char prot_type)
+{
+ memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
+ memcpy(QETH_IPA_CMD_PROT_TYPE(iob->data),&prot_type,1);
+ memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
+ &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
+}
+
+static int
+qeth_osn_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
+ int data_len)
+{
+ u16 s1, s2;
+
+QETH_DBF_TEXT(trace,4,"osndipa");
+
+ qeth_prepare_ipa_cmd(card, iob, QETH_PROT_OSN2);
+ s1 = (u16)(IPA_PDU_HEADER_SIZE + data_len);
+ s2 = (u16)data_len;
+ memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &s1, 2);
+ memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &s2, 2);
+ memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &s2, 2);
+ memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &s2, 2);
+ return qeth_osn_send_control_data(card, s1, iob);
+}
+
+static int
qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
int (*reply_cb)
(struct qeth_card *,struct qeth_reply*, unsigned long),
@@ -1858,17 +1933,14 @@ qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
QETH_DBF_TEXT(trace,4,"sendipa");
- memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
-
if (card->options.layer2)
- prot_type = QETH_PROT_LAYER2;
+ if (card->info.type == QETH_CARD_TYPE_OSN)
+ prot_type = QETH_PROT_OSN2;
+ else
+ prot_type = QETH_PROT_LAYER2;
else
prot_type = QETH_PROT_TCPIP;
-
- memcpy(QETH_IPA_CMD_PROT_TYPE(iob->data),&prot_type,1);
- memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
- &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
-
+ qeth_prepare_ipa_cmd(card,iob,prot_type);
rc = qeth_send_control_data(card, IPA_CMD_LENGTH, iob,
reply_cb, reply_param);
return rc;
@@ -2010,7 +2082,10 @@ qeth_ulp_enable(struct qeth_card *card)
*(QETH_ULP_ENABLE_LINKNUM(iob->data)) =
(__u8) card->info.portno;
if (card->options.layer2)
- prot_type = QETH_PROT_LAYER2;
+ if (card->info.type == QETH_CARD_TYPE_OSN)
+ prot_type = QETH_PROT_OSN2;
+ else
+ prot_type = QETH_PROT_LAYER2;
else
prot_type = QETH_PROT_TCPIP;
@@ -2100,15 +2175,21 @@ qeth_check_for_inbound_error(struct qeth_qdio_buffer *buf,
}
static inline struct sk_buff *
-qeth_get_skb(unsigned int length)
+qeth_get_skb(unsigned int length, struct qeth_hdr *hdr)
{
struct sk_buff* skb;
+ int add_len;
+
+ add_len = 0;
+ if (hdr->hdr.osn.id == QETH_HEADER_TYPE_OSN)
+ add_len = sizeof(struct qeth_hdr);
#ifdef CONFIG_QETH_VLAN
- if ((skb = dev_alloc_skb(length + VLAN_HLEN)))
- skb_reserve(skb, VLAN_HLEN);
-#else
- skb = dev_alloc_skb(length);
+ else
+ add_len = VLAN_HLEN;
#endif
+ skb = dev_alloc_skb(length + add_len);
+ if (skb && add_len)
+ skb_reserve(skb, add_len);
return skb;
}
@@ -2138,7 +2219,10 @@ qeth_get_next_skb(struct qeth_card *card, struct qdio_buffer *buffer,
offset += sizeof(struct qeth_hdr);
if (card->options.layer2)
- skb_len = (*hdr)->hdr.l2.pkt_length;
+ if (card->info.type == QETH_CARD_TYPE_OSN)
+ skb_len = (*hdr)->hdr.osn.pdu_length;
+ else
+ skb_len = (*hdr)->hdr.l2.pkt_length;
else
skb_len = (*hdr)->hdr.l3.length;
@@ -2146,15 +2230,15 @@ qeth_get_next_skb(struct qeth_card *card, struct qdio_buffer *buffer,
return NULL;
if (card->options.fake_ll){
if(card->dev->type == ARPHRD_IEEE802_TR){
- if (!(skb = qeth_get_skb(skb_len+QETH_FAKE_LL_LEN_TR)))
+ if (!(skb = qeth_get_skb(skb_len+QETH_FAKE_LL_LEN_TR, *hdr)))
goto no_mem;
skb_reserve(skb,QETH_FAKE_LL_LEN_TR);
} else {
- if (!(skb = qeth_get_skb(skb_len+QETH_FAKE_LL_LEN_ETH)))
+ if (!(skb = qeth_get_skb(skb_len+QETH_FAKE_LL_LEN_ETH, *hdr)))
goto no_mem;
skb_reserve(skb,QETH_FAKE_LL_LEN_ETH);
}
- } else if (!(skb = qeth_get_skb(skb_len)))
+ } else if (!(skb = qeth_get_skb(skb_len, *hdr)))
goto no_mem;
data_ptr = element->addr + offset;
while (skb_len) {
@@ -2453,8 +2537,12 @@ qeth_process_inbound_buffer(struct qeth_card *card,
skb->dev = card->dev;
if (hdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2)
vlan_tag = qeth_layer2_rebuild_skb(card, skb, hdr);
- else
+ else if (hdr->hdr.l3.id == QETH_HEADER_TYPE_LAYER3)
qeth_rebuild_skb(card, skb, hdr);
+ else { /*in case of OSN*/
+ skb_push(skb, sizeof(struct qeth_hdr));
+ memcpy(skb->data, hdr, sizeof(struct qeth_hdr));
+ }
/* is device UP ? */
if (!(card->dev->flags & IFF_UP)){
dev_kfree_skb_any(skb);
@@ -2465,7 +2553,10 @@ qeth_process_inbound_buffer(struct qeth_card *card,
vlan_hwaccel_rx(skb, card->vlangrp, vlan_tag);
else
#endif
- rxrc = netif_rx(skb);
+ if (card->info.type == QETH_CARD_TYPE_OSN)
+ rxrc = card->osn_info.data_cb(skb);
+ else
+ rxrc = netif_rx(skb);
card->dev->last_rx = jiffies;
card->stats.rx_packets++;
card->stats.rx_bytes += skb->len;
@@ -3150,8 +3241,6 @@ qeth_init_qdio_info(struct qeth_card *card)
INIT_LIST_HEAD(&card->qdio.in_buf_pool.entry_list);
INIT_LIST_HEAD(&card->qdio.init_pool.entry_list);
/* outbound */
- card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT;
- card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
}
static int
@@ -3466,7 +3555,7 @@ qeth_mpc_initialize(struct qeth_card *card)
return 0;
out_qdio:
- qeth_qdio_clear_card(card, card->info.type==QETH_CARD_TYPE_OSAE);
+ qeth_qdio_clear_card(card, card->info.type!=QETH_CARD_TYPE_IQD);
return rc;
}
@@ -3491,6 +3580,9 @@ qeth_get_netdevice(enum qeth_card_types type, enum qeth_link_types linktype)
case QETH_CARD_TYPE_IQD:
dev = alloc_netdev(0, "hsi%d", ether_setup);
break;
+ case QETH_CARD_TYPE_OSN:
+ dev = alloc_netdev(0, "osn%d", ether_setup);
+ break;
default:
dev = alloc_etherdev(0);
}
@@ -3655,7 +3747,8 @@ qeth_open(struct net_device *dev)
if (card->state != CARD_STATE_SOFTSETUP)
return -ENODEV;
- if ( (card->options.layer2) &&
+ if ( (card->info.type != QETH_CARD_TYPE_OSN) &&
+ (card->options.layer2) &&
(!card->info.layer2_mac_registered)) {
QETH_DBF_TEXT(trace,4,"nomacadr");
return -EPERM;
@@ -3693,6 +3786,9 @@ qeth_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
{
int cast_type = RTN_UNSPEC;
+ if (card->info.type == QETH_CARD_TYPE_OSN)
+ return cast_type;
+
if (skb->dst && skb->dst->neighbour){
cast_type = skb->dst->neighbour->type;
if ((cast_type == RTN_BROADCAST) ||
@@ -3782,13 +3878,16 @@ static inline int
qeth_prepare_skb(struct qeth_card *card, struct sk_buff **skb,
struct qeth_hdr **hdr, int ipv)
{
- int rc;
+ int rc = 0;
#ifdef CONFIG_QETH_VLAN
u16 *tag;
#endif
QETH_DBF_TEXT(trace, 6, "prepskb");
-
+ if (card->info.type == QETH_CARD_TYPE_OSN) {
+ *hdr = (struct qeth_hdr *)(*skb)->data;
+ return rc;
+ }
rc = qeth_realloc_headroom(card, skb, sizeof(struct qeth_hdr));
if (rc)
return rc;
@@ -4291,8 +4390,14 @@ qeth_send_packet(struct qeth_card *card, struct sk_buff *skb)
}
}
}
+ if ((card->info.type == QETH_CARD_TYPE_OSN) &&
+ (skb->protocol == htons(ETH_P_IPV6))) {
+ dev_kfree_skb_any(skb);
+ return 0;
+ }
cast_type = qeth_get_cast_type(card, skb);
- if ((cast_type == RTN_BROADCAST) && (card->info.broadcast_capable == 0)){
+ if ((cast_type == RTN_BROADCAST) &&
+ (card->info.broadcast_capable == 0)){
card->stats.tx_dropped++;
card->stats.tx_errors++;
dev_kfree_skb_any(skb);
@@ -4320,7 +4425,8 @@ qeth_send_packet(struct qeth_card *card, struct sk_buff *skb)
QETH_DBF_TEXT_(trace, 4, "pskbe%d", rc);
return rc;
}
- qeth_fill_header(card, hdr, skb, ipv, cast_type);
+ if (card->info.type != QETH_CARD_TYPE_OSN)
+ qeth_fill_header(card, hdr, skb, ipv, cast_type);
}
if (large_send == QETH_LARGE_SEND_EDDP) {
@@ -4381,6 +4487,7 @@ qeth_mdio_read(struct net_device *dev, int phy_id, int regnum)
case MII_BMCR: /* Basic mode control register */
rc = BMCR_FULLDPLX;
if ((card->info.link_type != QETH_LINK_TYPE_GBIT_ETH)&&
+ (card->info.link_type != QETH_LINK_TYPE_OSN) &&
(card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH))
rc |= BMCR_SPEED100;
break;
@@ -5004,6 +5111,9 @@ qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
(card->state != CARD_STATE_SOFTSETUP))
return -ENODEV;
+ if (card->info.type == QETH_CARD_TYPE_OSN)
+ return -EPERM;
+
switch (cmd){
case SIOC_QETH_ARP_SET_NO_ENTRIES:
if ( !capable(CAP_NET_ADMIN) ||
@@ -5329,6 +5439,9 @@ qeth_set_multicast_list(struct net_device *dev)
{
struct qeth_card *card = (struct qeth_card *) dev->priv;
+ if (card->info.type == QETH_CARD_TYPE_OSN)
+ return ;
+
QETH_DBF_TEXT(trace,3,"setmulti");
qeth_delete_mc_addresses(card);
qeth_add_multicast_ipv4(card);
@@ -5370,6 +5483,94 @@ qeth_get_addr_buffer(enum qeth_prot_versions prot)
return addr;
}
+int
+qeth_osn_assist(struct net_device *dev,
+ void *data,
+ int data_len)
+{
+ struct qeth_cmd_buffer *iob;
+ struct qeth_card *card;
+ int rc;
+
+ QETH_DBF_TEXT(trace, 2, "osnsdmc");
+ if (!dev)
+ return -ENODEV;
+ card = (struct qeth_card *)dev->priv;
+ if (!card)
+ return -ENODEV;
+ if ((card->state != CARD_STATE_UP) &&
+ (card->state != CARD_STATE_SOFTSETUP))
+ return -ENODEV;
+ iob = qeth_wait_for_buffer(&card->write);
+ memcpy(iob->data+IPA_PDU_HEADER_SIZE, data, data_len);
+ rc = qeth_osn_send_ipa_cmd(card, iob, data_len);
+ return rc;
+}
+
+static struct net_device *
+qeth_netdev_by_devno(unsigned char *read_dev_no)
+{
+ struct qeth_card *card;
+ struct net_device *ndev;
+ unsigned char *readno;
+ __u16 temp_dev_no, card_dev_no;
+ char *endp;
+ unsigned long flags;
+
+ ndev = NULL;
+ memcpy(&temp_dev_no, read_dev_no, 2);
+ read_lock_irqsave(&qeth_card_list.rwlock, flags);
+ list_for_each_entry(card, &qeth_card_list.list, list) {
+ readno = CARD_RDEV_ID(card);
+ readno += (strlen(readno) - 4);
+ card_dev_no = simple_strtoul(readno, &endp, 16);
+ if (card_dev_no == temp_dev_no) {
+ ndev = card->dev;
+ break;
+ }
+ }
+ read_unlock_irqrestore(&qeth_card_list.rwlock, flags);
+ return ndev;
+}
+
+int
+qeth_osn_register(unsigned char *read_dev_no,
+ struct net_device **dev,
+ int (*assist_cb)(struct net_device *, void *),
+ int (*data_cb)(struct sk_buff *))
+{
+ struct qeth_card * card;
+
+ QETH_DBF_TEXT(trace, 2, "osnreg");
+ *dev = qeth_netdev_by_devno(read_dev_no);
+ if (*dev == NULL)
+ return -ENODEV;
+ card = (struct qeth_card *)(*dev)->priv;
+ if (!card)
+ return -ENODEV;
+ if ((assist_cb == NULL) || (data_cb == NULL))
+ return -EINVAL;
+ card->osn_info.assist_cb = assist_cb;
+ card->osn_info.data_cb = data_cb;
+ return 0;
+}
+
+void
+qeth_osn_deregister(struct net_device * dev)
+{
+ struct qeth_card *card;
+
+ QETH_DBF_TEXT(trace, 2, "osndereg");
+ if (!dev)
+ return;
+ card = (struct qeth_card *)dev->priv;
+ if (!card)
+ return;
+ card->osn_info.assist_cb = NULL;
+ card->osn_info.data_cb = NULL;
+ return;
+}
+
static void
qeth_delete_mc_addresses(struct qeth_card *card)
{
@@ -5700,6 +5901,12 @@ qeth_layer2_set_mac_address(struct net_device *dev, void *p)
QETH_DBF_TEXT(trace, 3, "setmcLY3");
return -EOPNOTSUPP;
}
+ if (card->info.type == QETH_CARD_TYPE_OSN) {
+ PRINT_WARN("Setting MAC address on %s is not supported.\n",
+ dev->name);
+ QETH_DBF_TEXT(trace, 3, "setmcOSN");
+ return -EOPNOTSUPP;
+ }
QETH_DBF_TEXT_(trace, 3, "%s", CARD_BUS_ID(card));
QETH_DBF_HEX(trace, 3, addr->sa_data, OSA_ADDR_LEN);
rc = qeth_layer2_send_delmac(card, &card->dev->dev_addr[0]);
@@ -6076,9 +6283,8 @@ qeth_netdev_init(struct net_device *dev)
qeth_get_hlen(card->info.link_type) + card->options.add_hhlen;
dev->addr_len = OSA_ADDR_LEN;
dev->mtu = card->info.initial_mtu;
-
- SET_ETHTOOL_OPS(dev, &qeth_ethtool_ops);
-
+ if (card->info.type != QETH_CARD_TYPE_OSN)
+ SET_ETHTOOL_OPS(dev, &qeth_ethtool_ops);
SET_MODULE_OWNER(dev);
return 0;
}
@@ -6095,6 +6301,7 @@ qeth_init_func_level(struct qeth_card *card)
QETH_IDX_FUNC_LEVEL_OSAE_ENA_IPAT;
} else {
if (card->info.type == QETH_CARD_TYPE_IQD)
+ /*FIXME:why do we have same values for dis and ena for osae??? */
card->info.func_level =
QETH_IDX_FUNC_LEVEL_IQD_DIS_IPAT;
else
@@ -6124,7 +6331,7 @@ retry:
ccw_device_set_online(CARD_WDEV(card));
ccw_device_set_online(CARD_DDEV(card));
}
- rc = qeth_qdio_clear_card(card,card->info.type==QETH_CARD_TYPE_OSAE);
+ rc = qeth_qdio_clear_card(card,card->info.type!=QETH_CARD_TYPE_IQD);
if (rc == -ERESTARTSYS) {
QETH_DBF_TEXT(setup, 2, "break1");
return rc;
@@ -6176,8 +6383,8 @@ retry:
card->dev = qeth_get_netdevice(card->info.type,
card->info.link_type);
if (!card->dev){
- qeth_qdio_clear_card(card, card->info.type ==
- QETH_CARD_TYPE_OSAE);
+ qeth_qdio_clear_card(card, card->info.type !=
+ QETH_CARD_TYPE_IQD);
rc = -ENODEV;
QETH_DBF_TEXT_(setup, 2, "6err%d", rc);
goto out;
@@ -7084,6 +7291,8 @@ qeth_softsetup_card(struct qeth_card *card)
return rc;
} else
card->lan_online = 1;
+ if (card->info.type==QETH_CARD_TYPE_OSN)
+ goto out;
if (card->options.layer2) {
card->dev->features |=
NETIF_F_HW_VLAN_FILTER |
@@ -7255,7 +7464,8 @@ qeth_stop_card(struct qeth_card *card, int recovery_mode)
if (card->read.state == CH_STATE_UP &&
card->write.state == CH_STATE_UP &&
(card->state == CARD_STATE_UP)) {
- if(recovery_mode) {
+ if (recovery_mode &&
+ card->info.type != QETH_CARD_TYPE_OSN) {
qeth_stop(card->dev);
} else {
rtnl_lock();
@@ -7437,7 +7647,8 @@ qeth_start_again(struct qeth_card *card, int recovery_mode)
{
QETH_DBF_TEXT(setup ,2, "startag");
- if(recovery_mode) {
+ if (recovery_mode &&
+ card->info.type != QETH_CARD_TYPE_OSN) {
qeth_open(card->dev);
} else {
rtnl_lock();
@@ -7469,33 +7680,36 @@ qeth_start_again(struct qeth_card *card, int recovery_mode)
static void qeth_make_parameters_consistent(struct qeth_card *card)
{
- if (card->options.layer2) {
- if (card->info.type == QETH_CARD_TYPE_IQD) {
- PRINT_ERR("Device %s does not support " \
- "layer 2 functionality. " \
- "Ignoring layer2 option.\n",CARD_BUS_ID(card));
- }
- IGNORE_PARAM_NEQ(route4.type, NO_ROUTER, NO_ROUTER,
- "Routing options are");
+ if (card->options.layer2 == 0)
+ return;
+ if (card->info.type == QETH_CARD_TYPE_OSN)
+ return;
+ if (card->info.type == QETH_CARD_TYPE_IQD) {
+ PRINT_ERR("Device %s does not support layer 2 functionality." \
+ " Ignoring layer2 option.\n",CARD_BUS_ID(card));
+ card->options.layer2 = 0;
+ return;
+ }
+ IGNORE_PARAM_NEQ(route4.type, NO_ROUTER, NO_ROUTER,
+ "Routing options are");
#ifdef CONFIG_QETH_IPV6
- IGNORE_PARAM_NEQ(route6.type, NO_ROUTER, NO_ROUTER,
- "Routing options are");
+ IGNORE_PARAM_NEQ(route6.type, NO_ROUTER, NO_ROUTER,
+ "Routing options are");
#endif
- IGNORE_PARAM_EQ(checksum_type, HW_CHECKSUMMING,
- QETH_CHECKSUM_DEFAULT,
- "Checksumming options are");
- IGNORE_PARAM_NEQ(broadcast_mode, QETH_TR_BROADCAST_ALLRINGS,
- QETH_TR_BROADCAST_ALLRINGS,
- "Broadcast mode options are");
- IGNORE_PARAM_NEQ(macaddr_mode, QETH_TR_MACADDR_NONCANONICAL,
- QETH_TR_MACADDR_NONCANONICAL,
- "Canonical MAC addr options are");
- IGNORE_PARAM_NEQ(fake_broadcast, 0, 0,
- "Broadcast faking options are");
- IGNORE_PARAM_NEQ(add_hhlen, DEFAULT_ADD_HHLEN,
- DEFAULT_ADD_HHLEN,"Option add_hhlen is");
- IGNORE_PARAM_NEQ(fake_ll, 0, 0,"Option fake_ll is");
- }
+ IGNORE_PARAM_EQ(checksum_type, HW_CHECKSUMMING,
+ QETH_CHECKSUM_DEFAULT,
+ "Checksumming options are");
+ IGNORE_PARAM_NEQ(broadcast_mode, QETH_TR_BROADCAST_ALLRINGS,
+ QETH_TR_BROADCAST_ALLRINGS,
+ "Broadcast mode options are");
+ IGNORE_PARAM_NEQ(macaddr_mode, QETH_TR_MACADDR_NONCANONICAL,
+ QETH_TR_MACADDR_NONCANONICAL,
+ "Canonical MAC addr options are");
+ IGNORE_PARAM_NEQ(fake_broadcast, 0, 0,
+ "Broadcast faking options are");
+ IGNORE_PARAM_NEQ(add_hhlen, DEFAULT_ADD_HHLEN,
+ DEFAULT_ADD_HHLEN,"Option add_hhlen is");
+ IGNORE_PARAM_NEQ(fake_ll, 0, 0,"Option fake_ll is");
}
@@ -7525,8 +7739,7 @@ __qeth_set_online(struct ccwgroup_device *gdev, int recovery_mode)
return -EIO;
}
- if (card->options.layer2)
- qeth_make_parameters_consistent(card);
+ qeth_make_parameters_consistent(card);
if ((rc = qeth_hardsetup_card(card))){
QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
@@ -7585,6 +7798,7 @@ qeth_set_online(struct ccwgroup_device *gdev)
static struct ccw_device_id qeth_ids[] = {
{CCW_DEVICE(0x1731, 0x01), driver_info:QETH_CARD_TYPE_OSAE},
{CCW_DEVICE(0x1731, 0x05), driver_info:QETH_CARD_TYPE_IQD},
+ {CCW_DEVICE(0x1731, 0x06), driver_info:QETH_CARD_TYPE_OSN},
{},
};
MODULE_DEVICE_TABLE(ccw, qeth_ids);
@@ -8329,6 +8543,9 @@ again:
printk("qeth: removed\n");
}
+EXPORT_SYMBOL(qeth_osn_register);
+EXPORT_SYMBOL(qeth_osn_deregister);
+EXPORT_SYMBOL(qeth_osn_assist);
module_init(qeth_init);
module_exit(qeth_exit);
MODULE_AUTHOR("Frank Pavlic <pavlic@de.ibm.com>");
diff --git a/drivers/s390/net/qeth_mpc.c b/drivers/s390/net/qeth_mpc.c
index f685ecc7da9..30e053d3cac 100644
--- a/drivers/s390/net/qeth_mpc.c
+++ b/drivers/s390/net/qeth_mpc.c
@@ -11,7 +11,7 @@
#include <asm/cio.h>
#include "qeth_mpc.h"
-const char *VERSION_QETH_MPC_C = "$Revision: 1.11 $";
+const char *VERSION_QETH_MPC_C = "$Revision: 1.12 $";
unsigned char IDX_ACTIVATE_READ[]={
0x00,0x00,0x80,0x00, 0x00,0x00,0x00,0x00,
@@ -138,7 +138,9 @@ unsigned char IPA_PDU_HEADER[]={
sizeof(struct qeth_ipa_cmd)%256,
0x00,
sizeof(struct qeth_ipa_cmd)/256,
- sizeof(struct qeth_ipa_cmd),0x05, 0x77,0x77,0x77,0x77,
+ sizeof(struct qeth_ipa_cmd)%256,
+ 0x05,
+ 0x77,0x77,0x77,0x77,
0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00,
0x01,0x00,
sizeof(struct qeth_ipa_cmd)/256,
diff --git a/drivers/s390/net/qeth_mpc.h b/drivers/s390/net/qeth_mpc.h
index 3d916b5c5d0..7edc5f1fc0d 100644
--- a/drivers/s390/net/qeth_mpc.h
+++ b/drivers/s390/net/qeth_mpc.h
@@ -46,13 +46,16 @@ extern unsigned char IPA_PDU_HEADER[];
/* IP Assist related definitions */
/*****************************************************************************/
#define IPA_CMD_INITIATOR_HOST 0x00
-#define IPA_CMD_INITIATOR_HYDRA 0x01
+#define IPA_CMD_INITIATOR_OSA 0x01
+#define IPA_CMD_INITIATOR_HOST_REPLY 0x80
+#define IPA_CMD_INITIATOR_OSA_REPLY 0x81
#define IPA_CMD_PRIM_VERSION_NO 0x01
enum qeth_card_types {
QETH_CARD_TYPE_UNKNOWN = 0,
QETH_CARD_TYPE_OSAE = 10,
QETH_CARD_TYPE_IQD = 1234,
+ QETH_CARD_TYPE_OSN = 11,
};
#define QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE 0x18
@@ -61,6 +64,7 @@ enum qeth_link_types {
QETH_LINK_TYPE_FAST_ETH = 0x01,
QETH_LINK_TYPE_HSTR = 0x02,
QETH_LINK_TYPE_GBIT_ETH = 0x03,
+ QETH_LINK_TYPE_OSN = 0x04,
QETH_LINK_TYPE_10GBIT_ETH = 0x10,
QETH_LINK_TYPE_LANE_ETH100 = 0x81,
QETH_LINK_TYPE_LANE_TR = 0x82,
@@ -111,6 +115,9 @@ enum qeth_ipa_cmds {
IPA_CMD_DELGMAC = 0x24,
IPA_CMD_SETVLAN = 0x25,
IPA_CMD_DELVLAN = 0x26,
+ IPA_CMD_SETCCID = 0x41,
+ IPA_CMD_DELCCID = 0x42,
+ IPA_CMD_MODCCID = 0x43,
IPA_CMD_SETIP = 0xb1,
IPA_CMD_DELIP = 0xb7,
IPA_CMD_QIPASSIST = 0xb2,
@@ -437,8 +444,9 @@ enum qeth_ipa_arp_return_codes {
#define QETH_ARP_DATA_SIZE 3968
#define QETH_ARP_CMD_LEN (QETH_ARP_DATA_SIZE + 8)
/* Helper functions */
-#define IS_IPA_REPLY(cmd) (cmd->hdr.initiator == IPA_CMD_INITIATOR_HOST)
-
+#define IS_IPA_REPLY(cmd) ((cmd->hdr.initiator == IPA_CMD_INITIATOR_HOST) || \
+ (cmd->hdr.initiator == IPA_CMD_INITIATOR_OSA_REPLY))
+
/*****************************************************************************/
/* END OF IP Assist related definitions */
/*****************************************************************************/
@@ -483,6 +491,7 @@ extern unsigned char ULP_ENABLE[];
/* Layer 2 defintions */
#define QETH_PROT_LAYER2 0x08
#define QETH_PROT_TCPIP 0x03
+#define QETH_PROT_OSN2 0x0a
#define QETH_ULP_ENABLE_PROT_TYPE(buffer) (buffer+0x50)
#define QETH_IPA_CMD_PROT_TYPE(buffer) (buffer+0x19)
diff --git a/drivers/s390/net/qeth_sys.c b/drivers/s390/net/qeth_sys.c
index dda105b7306..f91a02db574 100644
--- a/drivers/s390/net/qeth_sys.c
+++ b/drivers/s390/net/qeth_sys.c
@@ -1,6 +1,6 @@
/*
*
- * linux/drivers/s390/net/qeth_sys.c ($Revision: 1.54 $)
+ * linux/drivers/s390/net/qeth_sys.c ($Revision: 1.55 $)
*
* Linux on zSeries OSA Express and HiperSockets support
* This file contains code related to sysfs.
@@ -20,7 +20,7 @@
#include "qeth_mpc.h"
#include "qeth_fs.h"
-const char *VERSION_QETH_SYS_C = "$Revision: 1.54 $";
+const char *VERSION_QETH_SYS_C = "$Revision: 1.55 $";
/*****************************************************************************/
/* */
@@ -937,6 +937,19 @@ static struct attribute_group qeth_device_attr_group = {
.attrs = (struct attribute **)qeth_device_attrs,
};
+static struct device_attribute * qeth_osn_device_attrs[] = {
+ &dev_attr_state,
+ &dev_attr_chpid,
+ &dev_attr_if_name,
+ &dev_attr_card_type,
+ &dev_attr_buffer_count,
+ &dev_attr_recover,
+ NULL,
+};
+
+static struct attribute_group qeth_osn_device_attr_group = {
+ .attrs = (struct attribute **)qeth_osn_device_attrs,
+};
#define QETH_DEVICE_ATTR(_id,_name,_mode,_show,_store) \
struct device_attribute dev_attr_##_id = { \
@@ -1667,7 +1680,12 @@ int
qeth_create_device_attributes(struct device *dev)
{
int ret;
+ struct qeth_card *card = dev->driver_data;
+ if (card->info.type == QETH_CARD_TYPE_OSN)
+ return sysfs_create_group(&dev->kobj,
+ &qeth_osn_device_attr_group);
+
if ((ret = sysfs_create_group(&dev->kobj, &qeth_device_attr_group)))
return ret;
if ((ret = sysfs_create_group(&dev->kobj, &qeth_device_ipato_group))){
@@ -1693,6 +1711,12 @@ qeth_create_device_attributes(struct device *dev)
void
qeth_remove_device_attributes(struct device *dev)
{
+ struct qeth_card *card = dev->driver_data;
+
+ if (card->info.type == QETH_CARD_TYPE_OSN)
+ return sysfs_remove_group(&dev->kobj,
+ &qeth_osn_device_attr_group);
+
sysfs_remove_group(&dev->kobj, &qeth_device_attr_group);
sysfs_remove_group(&dev->kobj, &qeth_device_ipato_group);
sysfs_remove_group(&dev->kobj, &qeth_device_vipa_group);
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index d40ba0bd68a..23392ae7df8 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -91,7 +91,7 @@
#ifndef NDEBUG
#define NDEBUG 0
#endif
-#ifndef NDEBUG
+#ifndef NDEBUG_ABORT
#define NDEBUG_ABORT 0
#endif
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index 4a99d2f000f..d54b1cc88d0 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -19,7 +19,7 @@
#define AAC_MAX_LUN (8)
#define AAC_MAX_HOSTPHYSMEMPAGES (0xfffff)
-#define AAC_MAX_32BIT_SGBCOUNT ((unsigned short)512)
+#define AAC_MAX_32BIT_SGBCOUNT ((unsigned short)256)
/*
* These macros convert from physical channels to virtual channels
diff --git a/drivers/scsi/ahci.c b/drivers/scsi/ahci.c
index cfa22e4ee54..fe8187d6f58 100644
--- a/drivers/scsi/ahci.c
+++ b/drivers/scsi/ahci.c
@@ -216,7 +216,7 @@ static Scsi_Host_Template ahci_sht = {
.ordered_flush = 1,
};
-static struct ata_port_operations ahci_ops = {
+static const struct ata_port_operations ahci_ops = {
.port_disable = ata_port_disable,
.check_status = ahci_check_status,
diff --git a/drivers/scsi/ata_piix.c b/drivers/scsi/ata_piix.c
index d71cef767ce..be021478f41 100644
--- a/drivers/scsi/ata_piix.c
+++ b/drivers/scsi/ata_piix.c
@@ -147,7 +147,7 @@ static Scsi_Host_Template piix_sht = {
.ordered_flush = 1,
};
-static struct ata_port_operations piix_pata_ops = {
+static const struct ata_port_operations piix_pata_ops = {
.port_disable = ata_port_disable,
.set_piomode = piix_set_piomode,
.set_dmamode = piix_set_dmamode,
@@ -177,7 +177,7 @@ static struct ata_port_operations piix_pata_ops = {
.host_stop = ata_host_stop,
};
-static struct ata_port_operations piix_sata_ops = {
+static const struct ata_port_operations piix_sata_ops = {
.port_disable = ata_port_disable,
.tf_load = ata_tf_load,
diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c
index c10e45b94b6..3d13fdee4fc 100644
--- a/drivers/scsi/eata.c
+++ b/drivers/scsi/eata.c
@@ -1357,7 +1357,7 @@ static int port_detect(unsigned long port_base, unsigned int j,
for (i = 0; i < shost->can_queue; i++) {
size_t sz = shost->sg_tablesize *sizeof(struct sg_list);
- unsigned int gfp_mask = (shost->unchecked_isa_dma ? GFP_DMA : 0) | GFP_ATOMIC;
+ gfp_t gfp_mask = (shost->unchecked_isa_dma ? GFP_DMA : 0) | GFP_ATOMIC;
ha->cp[i].sglist = kmalloc(sz, gfp_mask);
if (!ha->cp[i].sglist) {
printk
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 02fe371b0ab..f24d84538fd 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -287,7 +287,8 @@ static void scsi_host_dev_release(struct device *dev)
struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
{
struct Scsi_Host *shost;
- int gfp_mask = GFP_KERNEL, rval;
+ gfp_t gfp_mask = GFP_KERNEL;
+ int rval;
if (sht->unchecked_isa_dma && privsize)
gfp_mask |= __GFP_DMA;
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c
index 9269fd9b814..f53d7b8ac33 100644
--- a/drivers/scsi/libata-core.c
+++ b/drivers/scsi/libata-core.c
@@ -67,9 +67,9 @@ static void ata_dev_reread_id(struct ata_port *ap, struct ata_device *dev);
static void ata_dev_init_params(struct ata_port *ap, struct ata_device *dev);
static void ata_set_mode(struct ata_port *ap);
static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev);
-static unsigned int ata_get_mode_mask(struct ata_port *ap, int shift);
+static unsigned int ata_get_mode_mask(const struct ata_port *ap, int shift);
static int fgb(u32 bitmap);
-static int ata_choose_xfer_mode(struct ata_port *ap,
+static int ata_choose_xfer_mode(const struct ata_port *ap,
u8 *xfer_mode_out,
unsigned int *xfer_shift_out);
static void __ata_qc_complete(struct ata_queued_cmd *qc);
@@ -87,7 +87,7 @@ MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
/**
- * ata_tf_load - send taskfile registers to host controller
+ * ata_tf_load_pio - send taskfile registers to host controller
* @ap: Port to which output is sent
* @tf: ATA taskfile register set
*
@@ -97,7 +97,7 @@ MODULE_VERSION(DRV_VERSION);
* Inherited from caller.
*/
-static void ata_tf_load_pio(struct ata_port *ap, struct ata_taskfile *tf)
+static void ata_tf_load_pio(struct ata_port *ap, const struct ata_taskfile *tf)
{
struct ata_ioports *ioaddr = &ap->ioaddr;
unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
@@ -155,7 +155,7 @@ static void ata_tf_load_pio(struct ata_port *ap, struct ata_taskfile *tf)
* Inherited from caller.
*/
-static void ata_tf_load_mmio(struct ata_port *ap, struct ata_taskfile *tf)
+static void ata_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
{
struct ata_ioports *ioaddr = &ap->ioaddr;
unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
@@ -224,7 +224,7 @@ static void ata_tf_load_mmio(struct ata_port *ap, struct ata_taskfile *tf)
* LOCKING:
* Inherited from caller.
*/
-void ata_tf_load(struct ata_port *ap, struct ata_taskfile *tf)
+void ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
{
if (ap->flags & ATA_FLAG_MMIO)
ata_tf_load_mmio(ap, tf);
@@ -244,7 +244,7 @@ void ata_tf_load(struct ata_port *ap, struct ata_taskfile *tf)
* spin_lock_irqsave(host_set lock)
*/
-static void ata_exec_command_pio(struct ata_port *ap, struct ata_taskfile *tf)
+static void ata_exec_command_pio(struct ata_port *ap, const struct ata_taskfile *tf)
{
DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
@@ -265,7 +265,7 @@ static void ata_exec_command_pio(struct ata_port *ap, struct ata_taskfile *tf)
* spin_lock_irqsave(host_set lock)
*/
-static void ata_exec_command_mmio(struct ata_port *ap, struct ata_taskfile *tf)
+static void ata_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
{
DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
@@ -285,7 +285,7 @@ static void ata_exec_command_mmio(struct ata_port *ap, struct ata_taskfile *tf)
* LOCKING:
* spin_lock_irqsave(host_set lock)
*/
-void ata_exec_command(struct ata_port *ap, struct ata_taskfile *tf)
+void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf)
{
if (ap->flags & ATA_FLAG_MMIO)
ata_exec_command_mmio(ap, tf);
@@ -305,7 +305,7 @@ void ata_exec_command(struct ata_port *ap, struct ata_taskfile *tf)
* Obtains host_set lock.
*/
-static inline void ata_exec(struct ata_port *ap, struct ata_taskfile *tf)
+static inline void ata_exec(struct ata_port *ap, const struct ata_taskfile *tf)
{
unsigned long flags;
@@ -328,7 +328,7 @@ static inline void ata_exec(struct ata_port *ap, struct ata_taskfile *tf)
* Obtains host_set lock.
*/
-static void ata_tf_to_host(struct ata_port *ap, struct ata_taskfile *tf)
+static void ata_tf_to_host(struct ata_port *ap, const struct ata_taskfile *tf)
{
ap->ops->tf_load(ap, tf);
@@ -348,7 +348,7 @@ static void ata_tf_to_host(struct ata_port *ap, struct ata_taskfile *tf)
* spin_lock_irqsave(host_set lock)
*/
-void ata_tf_to_host_nolock(struct ata_port *ap, struct ata_taskfile *tf)
+void ata_tf_to_host_nolock(struct ata_port *ap, const struct ata_taskfile *tf)
{
ap->ops->tf_load(ap, tf);
ap->ops->exec_command(ap, tf);
@@ -558,7 +558,7 @@ u8 ata_chk_err(struct ata_port *ap)
* Inherited from caller.
*/
-void ata_tf_to_fis(struct ata_taskfile *tf, u8 *fis, u8 pmp)
+void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp)
{
fis[0] = 0x27; /* Register - Host to Device FIS */
fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
@@ -599,7 +599,7 @@ void ata_tf_to_fis(struct ata_taskfile *tf, u8 *fis, u8 pmp)
* Inherited from caller.
*/
-void ata_tf_from_fis(u8 *fis, struct ata_taskfile *tf)
+void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
{
tf->command = fis[2]; /* status */
tf->feature = fis[3]; /* error */
@@ -845,7 +845,7 @@ static unsigned int ata_devchk(struct ata_port *ap,
* the event of failure.
*/
-unsigned int ata_dev_classify(struct ata_taskfile *tf)
+unsigned int ata_dev_classify(const struct ata_taskfile *tf)
{
/* Apple's open source Darwin code hints that some devices only
* put a proper signature into the LBA mid/high registers,
@@ -937,7 +937,7 @@ static u8 ata_dev_try_classify(struct ata_port *ap, unsigned int device)
* caller.
*/
-void ata_dev_id_string(u16 *id, unsigned char *s,
+void ata_dev_id_string(const u16 *id, unsigned char *s,
unsigned int ofs, unsigned int len)
{
unsigned int c;
@@ -1054,7 +1054,7 @@ void ata_dev_select(struct ata_port *ap, unsigned int device,
* caller.
*/
-static inline void ata_dump_id(struct ata_device *dev)
+static inline void ata_dump_id(const struct ata_device *dev)
{
DPRINTK("49==0x%04x "
"53==0x%04x "
@@ -1089,7 +1089,7 @@ static inline void ata_dump_id(struct ata_device *dev)
* FIXME: pre IDE drive timing (do we care ?).
*/
-static unsigned int ata_pio_modes(struct ata_device *adev)
+static unsigned int ata_pio_modes(const struct ata_device *adev)
{
u16 modes;
@@ -1352,7 +1352,7 @@ err_out:
}
-static inline u8 ata_dev_knobble(struct ata_port *ap)
+static inline u8 ata_dev_knobble(const struct ata_port *ap)
{
return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(ap->device->id)));
}
@@ -1616,7 +1616,7 @@ static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
const struct ata_timing *t;
for (t = ata_timing; t->mode != speed; t++)
- if (t->mode != 0xFF)
+ if (t->mode == 0xFF)
return NULL;
return t;
}
@@ -1684,7 +1684,7 @@ int ata_timing_compute(struct ata_device *adev, unsigned short speed,
return 0;
}
-static struct {
+static const struct {
unsigned int shift;
u8 base;
} xfer_mode_classes[] = {
@@ -2093,7 +2093,8 @@ err_out:
DPRINTK("EXIT\n");
}
-static void ata_pr_blacklisted(struct ata_port *ap, struct ata_device *dev)
+static void ata_pr_blacklisted(const struct ata_port *ap,
+ const struct ata_device *dev)
{
printk(KERN_WARNING "ata%u: dev %u is on DMA blacklist, disabling DMA\n",
ap->id, dev->devno);
@@ -2131,7 +2132,7 @@ static const char * ata_dma_blacklist [] = {
"_NEC DV5800A",
};
-static int ata_dma_blacklisted(struct ata_port *ap, struct ata_device *dev)
+static int ata_dma_blacklisted(const struct ata_device *dev)
{
unsigned char model_num[40];
char *s;
@@ -2156,9 +2157,9 @@ static int ata_dma_blacklisted(struct ata_port *ap, struct ata_device *dev)
return 0;
}
-static unsigned int ata_get_mode_mask(struct ata_port *ap, int shift)
+static unsigned int ata_get_mode_mask(const struct ata_port *ap, int shift)
{
- struct ata_device *master, *slave;
+ const struct ata_device *master, *slave;
unsigned int mask;
master = &ap->device[0];
@@ -2170,14 +2171,14 @@ static unsigned int ata_get_mode_mask(struct ata_port *ap, int shift)
mask = ap->udma_mask;
if (ata_dev_present(master)) {
mask &= (master->id[ATA_ID_UDMA_MODES] & 0xff);
- if (ata_dma_blacklisted(ap, master)) {
+ if (ata_dma_blacklisted(master)) {
mask = 0;
ata_pr_blacklisted(ap, master);
}
}
if (ata_dev_present(slave)) {
mask &= (slave->id[ATA_ID_UDMA_MODES] & 0xff);
- if (ata_dma_blacklisted(ap, slave)) {
+ if (ata_dma_blacklisted(slave)) {
mask = 0;
ata_pr_blacklisted(ap, slave);
}
@@ -2187,14 +2188,14 @@ static unsigned int ata_get_mode_mask(struct ata_port *ap, int shift)
mask = ap->mwdma_mask;
if (ata_dev_present(master)) {
mask &= (master->id[ATA_ID_MWDMA_MODES] & 0x07);
- if (ata_dma_blacklisted(ap, master)) {
+ if (ata_dma_blacklisted(master)) {
mask = 0;
ata_pr_blacklisted(ap, master);
}
}
if (ata_dev_present(slave)) {
mask &= (slave->id[ATA_ID_MWDMA_MODES] & 0x07);
- if (ata_dma_blacklisted(ap, slave)) {
+ if (ata_dma_blacklisted(slave)) {
mask = 0;
ata_pr_blacklisted(ap, slave);
}
@@ -2258,7 +2259,7 @@ static int fgb(u32 bitmap)
* Zero on success, negative on error.
*/
-static int ata_choose_xfer_mode(struct ata_port *ap,
+static int ata_choose_xfer_mode(const struct ata_port *ap,
u8 *xfer_mode_out,
unsigned int *xfer_shift_out)
{
@@ -2700,13 +2701,13 @@ void ata_poll_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat)
/**
* ata_pio_poll -
- * @ap:
+ * @ap: the target ata_port
*
* LOCKING:
* None. (executing in kernel thread context)
*
* RETURNS:
- *
+ * timeout value to use
*/
static unsigned long ata_pio_poll(struct ata_port *ap)
@@ -2747,8 +2748,8 @@ static unsigned long ata_pio_poll(struct ata_port *ap)
}
/**
- * ata_pio_complete -
- * @ap:
+ * ata_pio_complete - check if drive is busy or idle
+ * @ap: the target ata_port
*
* LOCKING:
* None. (executing in kernel thread context)
@@ -2800,7 +2801,7 @@ static int ata_pio_complete (struct ata_port *ap)
/**
- * swap_buf_le16 -
+ * swap_buf_le16 - swap halves of 16-words in place
* @buf: Buffer to swap
* @buf_words: Number of 16-bit words in buffer.
*
@@ -2809,6 +2810,7 @@ static int ata_pio_complete (struct ata_port *ap)
* vice-versa.
*
* LOCKING:
+ * Inherited from caller.
*/
void swap_buf_le16(u16 *buf, unsigned int buf_words)
{
@@ -2831,7 +2833,6 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)
*
* LOCKING:
* Inherited from caller.
- *
*/
static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf,
@@ -2877,7 +2878,6 @@ static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf,
*
* LOCKING:
* Inherited from caller.
- *
*/
static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf,
@@ -2917,7 +2917,6 @@ static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf,
*
* LOCKING:
* Inherited from caller.
- *
*/
static void ata_data_xfer(struct ata_port *ap, unsigned char *buf,
@@ -3070,7 +3069,6 @@ next_sg:
*
* LOCKING:
* Inherited from caller.
- *
*/
static void atapi_pio_bytes(struct ata_queued_cmd *qc)
@@ -3106,8 +3104,8 @@ err_out:
}
/**
- * ata_pio_sector -
- * @ap:
+ * ata_pio_block - start PIO on a block
+ * @ap: the target ata_port
*
* LOCKING:
* None. (executing in kernel thread context)
@@ -3119,7 +3117,7 @@ static void ata_pio_block(struct ata_port *ap)
u8 status;
/*
- * This is purely hueristic. This is a fast path.
+ * This is purely heuristic. This is a fast path.
* Sometimes when we enter, BSY will be cleared in
* a chk-status or two. If not, the drive is probably seeking
* or something. Snooze for a couple msecs, then
@@ -3439,7 +3437,6 @@ static void __ata_qc_complete(struct ata_queued_cmd *qc)
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
- *
*/
void ata_qc_free(struct ata_queued_cmd *qc)
{
@@ -3459,7 +3456,6 @@ void ata_qc_free(struct ata_queued_cmd *qc)
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
- *
*/
void ata_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat)
@@ -3953,7 +3949,6 @@ idle_irq:
*
* RETURNS:
* IRQ_NONE or IRQ_HANDLED.
- *
*/
irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
@@ -4065,6 +4060,7 @@ err_out:
* May be used as the port_start() entry in ata_port_operations.
*
* LOCKING:
+ * Inherited from caller.
*/
int ata_port_start (struct ata_port *ap)
@@ -4090,6 +4086,7 @@ int ata_port_start (struct ata_port *ap)
* May be used as the port_stop() entry in ata_port_operations.
*
* LOCKING:
+ * Inherited from caller.
*/
void ata_port_stop (struct ata_port *ap)
@@ -4112,6 +4109,7 @@ void ata_host_stop (struct ata_host_set *host_set)
* @do_unregister: 1 if we fully unregister, 0 to just stop the port
*
* LOCKING:
+ * Inherited from caller.
*/
static void ata_host_remove(struct ata_port *ap, unsigned int do_unregister)
@@ -4139,12 +4137,11 @@ static void ata_host_remove(struct ata_port *ap, unsigned int do_unregister)
*
* LOCKING:
* Inherited from caller.
- *
*/
static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
struct ata_host_set *host_set,
- struct ata_probe_ent *ent, unsigned int port_no)
+ const struct ata_probe_ent *ent, unsigned int port_no)
{
unsigned int i;
@@ -4200,10 +4197,9 @@ static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
*
* RETURNS:
* New ata_port on success, for NULL on error.
- *
*/
-static struct ata_port * ata_host_add(struct ata_probe_ent *ent,
+static struct ata_port * ata_host_add(const struct ata_probe_ent *ent,
struct ata_host_set *host_set,
unsigned int port_no)
{
@@ -4248,10 +4244,9 @@ err_out:
*
* RETURNS:
* Number of ports registered. Zero on error (no ports registered).
- *
*/
-int ata_device_add(struct ata_probe_ent *ent)
+int ata_device_add(const struct ata_probe_ent *ent)
{
unsigned int count = 0, i;
struct device *dev = ent->dev;
@@ -4380,7 +4375,6 @@ err_out:
* Inherited from calling layer (may sleep).
*/
-
void ata_host_set_remove(struct ata_host_set *host_set)
{
struct ata_port *ap;
@@ -4470,7 +4464,7 @@ void ata_std_ports(struct ata_ioports *ioaddr)
}
static struct ata_probe_ent *
-ata_probe_ent_alloc(struct device *dev, struct ata_port_info *port)
+ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port)
{
struct ata_probe_ent *probe_ent;
@@ -4570,7 +4564,6 @@ static struct ata_probe_ent *ata_pci_init_legacy_port(struct pci_dev *pdev, stru
if (!probe_ent)
return NULL;
-
probe_ent->legacy_mode = 1;
probe_ent->n_ports = 1;
probe_ent->hard_port_no = port_num;
@@ -4614,7 +4607,6 @@ static struct ata_probe_ent *ata_pci_init_legacy_port(struct pci_dev *pdev, stru
*
* RETURNS:
* Zero on success, negative on errno-based value on error.
- *
*/
int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
@@ -4762,7 +4754,7 @@ err_out:
* @pdev: PCI device that was removed
*
* PCI layer indicates to libata via this hook that
- * hot-unplug or module unload event has occured.
+ * hot-unplug or module unload event has occurred.
* Handle this by unregistering all objects associated
* with this PCI device. Free those objects. Then finally
* release PCI resources and disable device.
@@ -4783,7 +4775,7 @@ void ata_pci_remove_one (struct pci_dev *pdev)
}
/* move to PCI subsystem */
-int pci_test_config_bits(struct pci_dev *pdev, struct pci_bits *bits)
+int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
{
unsigned long tmp = 0;
diff --git a/drivers/scsi/libata-scsi.c b/drivers/scsi/libata-scsi.c
index 1ad75d58c30..b761bd1b971 100644
--- a/drivers/scsi/libata-scsi.c
+++ b/drivers/scsi/libata-scsi.c
@@ -47,9 +47,9 @@
#define SECTOR_SIZE 512
-typedef unsigned int (*ata_xlat_func_t)(struct ata_queued_cmd *qc, u8 *scsicmd);
+typedef unsigned int (*ata_xlat_func_t)(struct ata_queued_cmd *qc, const u8 *scsicmd);
static struct ata_device *
-ata_scsi_find_dev(struct ata_port *ap, struct scsi_device *scsidev);
+ata_scsi_find_dev(struct ata_port *ap, const struct scsi_device *scsidev);
static void ata_scsi_invalid_field(struct scsi_cmnd *cmd,
@@ -710,7 +710,7 @@ int ata_scsi_error(struct Scsi_Host *host)
*/
static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc,
- u8 *scsicmd)
+ const u8 *scsicmd)
{
struct ata_taskfile *tf = &qc->tf;
@@ -777,7 +777,7 @@ invalid_fld:
* Zero on success, non-zero on error.
*/
-static unsigned int ata_scsi_flush_xlat(struct ata_queued_cmd *qc, u8 *scsicmd)
+static unsigned int ata_scsi_flush_xlat(struct ata_queued_cmd *qc, const u8 *scsicmd)
{
struct ata_taskfile *tf = &qc->tf;
@@ -804,7 +804,7 @@ static unsigned int ata_scsi_flush_xlat(struct ata_queued_cmd *qc, u8 *scsicmd)
* @plen: the transfer length
*/
-static void scsi_6_lba_len(u8 *scsicmd, u64 *plba, u32 *plen)
+static void scsi_6_lba_len(const u8 *scsicmd, u64 *plba, u32 *plen)
{
u64 lba = 0;
u32 len = 0;
@@ -831,7 +831,7 @@ static void scsi_6_lba_len(u8 *scsicmd, u64 *plba, u32 *plen)
* @plen: the transfer length
*/
-static void scsi_10_lba_len(u8 *scsicmd, u64 *plba, u32 *plen)
+static void scsi_10_lba_len(const u8 *scsicmd, u64 *plba, u32 *plen)
{
u64 lba = 0;
u32 len = 0;
@@ -861,7 +861,7 @@ static void scsi_10_lba_len(u8 *scsicmd, u64 *plba, u32 *plen)
* @plen: the transfer length
*/
-static void scsi_16_lba_len(u8 *scsicmd, u64 *plba, u32 *plen)
+static void scsi_16_lba_len(const u8 *scsicmd, u64 *plba, u32 *plen)
{
u64 lba = 0;
u32 len = 0;
@@ -900,7 +900,7 @@ static void scsi_16_lba_len(u8 *scsicmd, u64 *plba, u32 *plen)
* Zero on success, non-zero on error.
*/
-static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc, u8 *scsicmd)
+static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc, const u8 *scsicmd)
{
struct ata_taskfile *tf = &qc->tf;
struct ata_device *dev = qc->dev;
@@ -1026,7 +1026,7 @@ nothing_to_do:
* Zero on success, non-zero on error.
*/
-static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, u8 *scsicmd)
+static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, const u8 *scsicmd)
{
struct ata_taskfile *tf = &qc->tf;
struct ata_device *dev = qc->dev;
@@ -2010,7 +2010,7 @@ static int atapi_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat)
* Zero on success, non-zero on failure.
*/
-static unsigned int atapi_xlat(struct ata_queued_cmd *qc, u8 *scsicmd)
+static unsigned int atapi_xlat(struct ata_queued_cmd *qc, const u8 *scsicmd)
{
struct scsi_cmnd *cmd = qc->scsicmd;
struct ata_device *dev = qc->dev;
@@ -2079,7 +2079,7 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc, u8 *scsicmd)
*/
static struct ata_device *
-ata_scsi_find_dev(struct ata_port *ap, struct scsi_device *scsidev)
+ata_scsi_find_dev(struct ata_port *ap, const struct scsi_device *scsidev)
{
struct ata_device *dev;
@@ -2378,7 +2378,7 @@ void ata_scsi_simulate(u16 *id,
void (*done)(struct scsi_cmnd *))
{
struct ata_scsi_args args;
- u8 *scsicmd = cmd->cmnd;
+ const u8 *scsicmd = cmd->cmnd;
args.id = id;
args.cmd = cmd;
diff --git a/drivers/scsi/libata.h b/drivers/scsi/libata.h
index 39cce63dc45..65c264b9113 100644
--- a/drivers/scsi/libata.h
+++ b/drivers/scsi/libata.h
@@ -48,7 +48,7 @@ extern int ata_qc_issue(struct ata_queued_cmd *qc);
extern int ata_check_atapi_dma(struct ata_queued_cmd *qc);
extern void ata_dev_select(struct ata_port *ap, unsigned int device,
unsigned int wait, unsigned int can_sleep);
-extern void ata_tf_to_host_nolock(struct ata_port *ap, struct ata_taskfile *tf);
+extern void ata_tf_to_host_nolock(struct ata_port *ap, const struct ata_taskfile *tf);
extern void swap_buf_le16(u16 *buf, unsigned int buf_words);
extern int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg);
extern int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg);
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index 0aba13ceaac..352df47bcac 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -39,7 +39,7 @@
#define LPFC_MEM_POOL_SIZE 64 /* max elem in non-DMA safety pool */
static void *
-lpfc_pool_kmalloc(unsigned int gfp_flags, void *data)
+lpfc_pool_kmalloc(gfp_t gfp_flags, void *data)
{
return kmalloc((unsigned long)data, gfp_flags);
}
diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c
index 3f2f2464fa6..af1133104b3 100644
--- a/drivers/scsi/osst.c
+++ b/drivers/scsi/osst.c
@@ -5146,7 +5146,8 @@ static long osst_compat_ioctl(struct file * file, unsigned int cmd_in, unsigned
/* Try to allocate a new tape buffer skeleton. Caller must not hold os_scsi_tapes_lock */
static struct osst_buffer * new_tape_buffer( int from_initialization, int need_dma, int max_sg )
{
- int i, priority;
+ int i;
+ gfp_t priority;
struct osst_buffer *tb;
if (from_initialization)
@@ -5178,7 +5179,8 @@ static struct osst_buffer * new_tape_buffer( int from_initialization, int need_d
/* Try to allocate a temporary (while a user has the device open) enlarged tape buffer */
static int enlarge_buffer(struct osst_buffer *STbuffer, int need_dma)
{
- int segs, nbr, max_segs, b_size, priority, order, got;
+ int segs, nbr, max_segs, b_size, order, got;
+ gfp_t priority;
if (STbuffer->buffer_size >= OS_FRAME_SIZE)
return 1;
diff --git a/drivers/scsi/pdc_adma.c b/drivers/scsi/pdc_adma.c
index 53b8db4be1a..9820f272f88 100644
--- a/drivers/scsi/pdc_adma.c
+++ b/drivers/scsi/pdc_adma.c
@@ -158,7 +158,7 @@ static Scsi_Host_Template adma_ata_sht = {
.bios_param = ata_std_bios_param,
};
-static struct ata_port_operations adma_ata_ops = {
+static const struct ata_port_operations adma_ata_ops = {
.port_disable = ata_port_disable,
.tf_load = ata_tf_load,
.tf_read = ata_tf_read,
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 1ed32e7b547..e451941ad81 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -52,7 +52,7 @@ extern int qla2x00_load_risc(struct scsi_qla_host *, uint32_t *);
extern int qla24xx_load_risc_flash(scsi_qla_host_t *, uint32_t *);
extern int qla24xx_load_risc_hotplug(scsi_qla_host_t *, uint32_t *);
-extern fc_port_t *qla2x00_alloc_fcport(scsi_qla_host_t *, int);
+extern fc_port_t *qla2x00_alloc_fcport(scsi_qla_host_t *, gfp_t);
extern int qla2x00_loop_resync(scsi_qla_host_t *);
@@ -277,7 +277,7 @@ extern int qla2x00_fdmi_register(scsi_qla_host_t *);
/*
* Global Function Prototypes in qla_rscn.c source file.
*/
-extern fc_port_t *qla2x00_alloc_rscn_fcport(scsi_qla_host_t *, int);
+extern fc_port_t *qla2x00_alloc_rscn_fcport(scsi_qla_host_t *, gfp_t);
extern int qla2x00_handle_port_rscn(scsi_qla_host_t *, uint32_t, fc_port_t *,
int);
extern void qla2x00_process_iodesc(scsi_qla_host_t *, struct mbx_entry *);
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 23d095d3817..fbb6feee40c 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -1685,7 +1685,7 @@ qla2x00_nvram_config(scsi_qla_host_t *ha)
* Returns a pointer to the allocated fcport, or NULL, if none available.
*/
fc_port_t *
-qla2x00_alloc_fcport(scsi_qla_host_t *ha, int flags)
+qla2x00_alloc_fcport(scsi_qla_host_t *ha, gfp_t flags)
{
fc_port_t *fcport;
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 8982978c42f..7aec93f9d42 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -1325,6 +1325,8 @@ int qla2x00_probe_one(struct pci_dev *pdev, struct qla_board_info *brd_info)
ha->brd_info = brd_info;
sprintf(ha->host_str, "%s_%ld", ha->brd_info->drv_name, ha->host_no);
+ ha->dpc_pid = -1;
+
/* Configure PCI I/O space */
ret = qla2x00_iospace_config(ha);
if (ret)
@@ -1448,7 +1450,6 @@ int qla2x00_probe_one(struct pci_dev *pdev, struct qla_board_info *brd_info)
*/
spin_lock_init(&ha->mbx_reg_lock);
- ha->dpc_pid = -1;
init_completion(&ha->dpc_inited);
init_completion(&ha->dpc_exited);
diff --git a/drivers/scsi/qla2xxx/qla_rscn.c b/drivers/scsi/qla2xxx/qla_rscn.c
index 1eba9882863..7534efcc891 100644
--- a/drivers/scsi/qla2xxx/qla_rscn.c
+++ b/drivers/scsi/qla2xxx/qla_rscn.c
@@ -1066,7 +1066,7 @@ qla2x00_send_login_iocb_cb(scsi_qla_host_t *ha, struct io_descriptor *iodesc,
* Returns a pointer to the allocated RSCN fcport, or NULL, if none available.
*/
fc_port_t *
-qla2x00_alloc_rscn_fcport(scsi_qla_host_t *ha, int flags)
+qla2x00_alloc_rscn_fcport(scsi_qla_host_t *ha, gfp_t flags)
{
fc_port_t *fcport;
diff --git a/drivers/scsi/sata_mv.c b/drivers/scsi/sata_mv.c
index 9b6213928f7..422e0b6f603 100644
--- a/drivers/scsi/sata_mv.c
+++ b/drivers/scsi/sata_mv.c
@@ -290,7 +290,7 @@ static Scsi_Host_Template mv_sht = {
.ordered_flush = 1,
};
-static struct ata_port_operations mv_ops = {
+static const struct ata_port_operations mv_ops = {
.port_disable = ata_port_disable,
.tf_load = ata_tf_load,
diff --git a/drivers/scsi/sata_nv.c b/drivers/scsi/sata_nv.c
index 8866530bc49..1a56d6c79dd 100644
--- a/drivers/scsi/sata_nv.c
+++ b/drivers/scsi/sata_nv.c
@@ -238,7 +238,7 @@ static Scsi_Host_Template nv_sht = {
.ordered_flush = 1,
};
-static struct ata_port_operations nv_ops = {
+static const struct ata_port_operations nv_ops = {
.port_disable = ata_port_disable,
.tf_load = ata_tf_load,
.tf_read = ata_tf_read,
diff --git a/drivers/scsi/sata_promise.c b/drivers/scsi/sata_promise.c
index 9bf8cbd2990..eee93b0016d 100644
--- a/drivers/scsi/sata_promise.c
+++ b/drivers/scsi/sata_promise.c
@@ -87,8 +87,8 @@ static void pdc_port_stop(struct ata_port *ap);
static void pdc_pata_phy_reset(struct ata_port *ap);
static void pdc_sata_phy_reset(struct ata_port *ap);
static void pdc_qc_prep(struct ata_queued_cmd *qc);
-static void pdc_tf_load_mmio(struct ata_port *ap, struct ata_taskfile *tf);
-static void pdc_exec_command_mmio(struct ata_port *ap, struct ata_taskfile *tf);
+static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
+static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
static void pdc_irq_clear(struct ata_port *ap);
static int pdc_qc_issue_prot(struct ata_queued_cmd *qc);
@@ -113,7 +113,7 @@ static Scsi_Host_Template pdc_ata_sht = {
.ordered_flush = 1,
};
-static struct ata_port_operations pdc_sata_ops = {
+static const struct ata_port_operations pdc_sata_ops = {
.port_disable = ata_port_disable,
.tf_load = pdc_tf_load_mmio,
.tf_read = ata_tf_read,
@@ -136,7 +136,7 @@ static struct ata_port_operations pdc_sata_ops = {
.host_stop = ata_pci_host_stop,
};
-static struct ata_port_operations pdc_pata_ops = {
+static const struct ata_port_operations pdc_pata_ops = {
.port_disable = ata_port_disable,
.tf_load = pdc_tf_load_mmio,
.tf_read = ata_tf_read,
@@ -546,7 +546,7 @@ static int pdc_qc_issue_prot(struct ata_queued_cmd *qc)
return ata_qc_issue_prot(qc);
}
-static void pdc_tf_load_mmio(struct ata_port *ap, struct ata_taskfile *tf)
+static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
{
WARN_ON (tf->protocol == ATA_PROT_DMA ||
tf->protocol == ATA_PROT_NODATA);
@@ -554,7 +554,7 @@ static void pdc_tf_load_mmio(struct ata_port *ap, struct ata_taskfile *tf)
}
-static void pdc_exec_command_mmio(struct ata_port *ap, struct ata_taskfile *tf)
+static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
{
WARN_ON (tf->protocol == ATA_PROT_DMA ||
tf->protocol == ATA_PROT_NODATA);
diff --git a/drivers/scsi/sata_qstor.c b/drivers/scsi/sata_qstor.c
index e1c1dae27c5..250dafa6bc3 100644
--- a/drivers/scsi/sata_qstor.c
+++ b/drivers/scsi/sata_qstor.c
@@ -147,7 +147,7 @@ static Scsi_Host_Template qs_ata_sht = {
.bios_param = ata_std_bios_param,
};
-static struct ata_port_operations qs_ata_ops = {
+static const struct ata_port_operations qs_ata_ops = {
.port_disable = ata_port_disable,
.tf_load = ata_tf_load,
.tf_read = ata_tf_read,
diff --git a/drivers/scsi/sata_sil.c b/drivers/scsi/sata_sil.c
index f6f0184e1ac..3a056173fb9 100644
--- a/drivers/scsi/sata_sil.c
+++ b/drivers/scsi/sata_sil.c
@@ -150,7 +150,7 @@ static Scsi_Host_Template sil_sht = {
.ordered_flush = 1,
};
-static struct ata_port_operations sil_ops = {
+static const struct ata_port_operations sil_ops = {
.port_disable = ata_port_disable,
.dev_config = sil_dev_config,
.tf_load = ata_tf_load,
diff --git a/drivers/scsi/sata_sil24.c b/drivers/scsi/sata_sil24.c
index 19857814d69..32d730bd5bb 100644
--- a/drivers/scsi/sata_sil24.c
+++ b/drivers/scsi/sata_sil24.c
@@ -275,7 +275,7 @@ static Scsi_Host_Template sil24_sht = {
.ordered_flush = 1, /* NCQ not supported yet */
};
-static struct ata_port_operations sil24_ops = {
+static const struct ata_port_operations sil24_ops = {
.port_disable = ata_port_disable,
.check_status = sil24_check_status,
diff --git a/drivers/scsi/sata_sis.c b/drivers/scsi/sata_sis.c
index 0761a3234fc..057f7b98b6c 100644
--- a/drivers/scsi/sata_sis.c
+++ b/drivers/scsi/sata_sis.c
@@ -102,7 +102,7 @@ static Scsi_Host_Template sis_sht = {
.ordered_flush = 1,
};
-static struct ata_port_operations sis_ops = {
+static const struct ata_port_operations sis_ops = {
.port_disable = ata_port_disable,
.tf_load = ata_tf_load,
.tf_read = ata_tf_read,
diff --git a/drivers/scsi/sata_svw.c b/drivers/scsi/sata_svw.c
index d89d968beda..e0f9570bc6d 100644
--- a/drivers/scsi/sata_svw.c
+++ b/drivers/scsi/sata_svw.c
@@ -102,7 +102,7 @@ static void k2_sata_scr_write (struct ata_port *ap, unsigned int sc_reg,
}
-static void k2_sata_tf_load(struct ata_port *ap, struct ata_taskfile *tf)
+static void k2_sata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
{
struct ata_ioports *ioaddr = &ap->ioaddr;
unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
@@ -297,7 +297,7 @@ static Scsi_Host_Template k2_sata_sht = {
};
-static struct ata_port_operations k2_sata_ops = {
+static const struct ata_port_operations k2_sata_ops = {
.port_disable = ata_port_disable,
.tf_load = k2_sata_tf_load,
.tf_read = k2_sata_tf_read,
diff --git a/drivers/scsi/sata_sx4.c b/drivers/scsi/sata_sx4.c
index d6d350a0b5e..af08f4f650c 100644
--- a/drivers/scsi/sata_sx4.c
+++ b/drivers/scsi/sata_sx4.c
@@ -157,8 +157,8 @@ static void pdc_20621_phy_reset (struct ata_port *ap);
static int pdc_port_start(struct ata_port *ap);
static void pdc_port_stop(struct ata_port *ap);
static void pdc20621_qc_prep(struct ata_queued_cmd *qc);
-static void pdc_tf_load_mmio(struct ata_port *ap, struct ata_taskfile *tf);
-static void pdc_exec_command_mmio(struct ata_port *ap, struct ata_taskfile *tf);
+static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
+static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
static void pdc20621_host_stop(struct ata_host_set *host_set);
static unsigned int pdc20621_dimm_init(struct ata_probe_ent *pe);
static int pdc20621_detect_dimm(struct ata_probe_ent *pe);
@@ -196,7 +196,7 @@ static Scsi_Host_Template pdc_sata_sht = {
.ordered_flush = 1,
};
-static struct ata_port_operations pdc_20621_ops = {
+static const struct ata_port_operations pdc_20621_ops = {
.port_disable = ata_port_disable,
.tf_load = pdc_tf_load_mmio,
.tf_read = ata_tf_read,
@@ -899,7 +899,7 @@ out:
DPRINTK("EXIT\n");
}
-static void pdc_tf_load_mmio(struct ata_port *ap, struct ata_taskfile *tf)
+static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
{
WARN_ON (tf->protocol == ATA_PROT_DMA ||
tf->protocol == ATA_PROT_NODATA);
@@ -907,7 +907,7 @@ static void pdc_tf_load_mmio(struct ata_port *ap, struct ata_taskfile *tf)
}
-static void pdc_exec_command_mmio(struct ata_port *ap, struct ata_taskfile *tf)
+static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
{
WARN_ON (tf->protocol == ATA_PROT_DMA ||
tf->protocol == ATA_PROT_NODATA);
diff --git a/drivers/scsi/sata_uli.c b/drivers/scsi/sata_uli.c
index 9c06f2abe7f..d68dc7d3422 100644
--- a/drivers/scsi/sata_uli.c
+++ b/drivers/scsi/sata_uli.c
@@ -90,7 +90,7 @@ static Scsi_Host_Template uli_sht = {
.ordered_flush = 1,
};
-static struct ata_port_operations uli_ops = {
+static const struct ata_port_operations uli_ops = {
.port_disable = ata_port_disable,
.tf_load = ata_tf_load,
diff --git a/drivers/scsi/sata_via.c b/drivers/scsi/sata_via.c
index 565872479b9..80e291a909a 100644
--- a/drivers/scsi/sata_via.c
+++ b/drivers/scsi/sata_via.c
@@ -109,7 +109,7 @@ static Scsi_Host_Template svia_sht = {
.ordered_flush = 1,
};
-static struct ata_port_operations svia_sata_ops = {
+static const struct ata_port_operations svia_sata_ops = {
.port_disable = ata_port_disable,
.tf_load = ata_tf_load,
diff --git a/drivers/scsi/sata_vsc.c b/drivers/scsi/sata_vsc.c
index 877b9fda396..5af05fdf854 100644
--- a/drivers/scsi/sata_vsc.c
+++ b/drivers/scsi/sata_vsc.c
@@ -115,7 +115,7 @@ static void vsc_intr_mask_update(struct ata_port *ap, u8 ctl)
}
-static void vsc_sata_tf_load(struct ata_port *ap, struct ata_taskfile *tf)
+static void vsc_sata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
{
struct ata_ioports *ioaddr = &ap->ioaddr;
unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
@@ -231,7 +231,7 @@ static Scsi_Host_Template vsc_sata_sht = {
};
-static struct ata_port_operations vsc_sata_ops = {
+static const struct ata_port_operations vsc_sata_ops = {
.port_disable = ata_port_disable,
.tf_load = vsc_sata_tf_load,
.tf_read = vsc_sata_tf_read,
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 1f0ebabf6d4..a5711d545d7 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -130,7 +130,7 @@ EXPORT_SYMBOL(scsi_device_types);
* Returns: Pointer to request block.
*/
struct scsi_request *scsi_allocate_request(struct scsi_device *sdev,
- int gfp_mask)
+ gfp_t gfp_mask)
{
const int offset = ALIGN(sizeof(struct scsi_request), 4);
const int size = offset + sizeof(struct request);
@@ -196,7 +196,7 @@ struct scsi_host_cmd_pool {
unsigned int users;
char *name;
unsigned int slab_flags;
- unsigned int gfp_mask;
+ gfp_t gfp_mask;
};
static struct scsi_host_cmd_pool scsi_cmd_pool = {
@@ -213,7 +213,7 @@ static struct scsi_host_cmd_pool scsi_cmd_dma_pool = {
static DECLARE_MUTEX(host_cmd_pool_mutex);
static struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *shost,
- int gfp_mask)
+ gfp_t gfp_mask)
{
struct scsi_cmnd *cmd;
@@ -245,7 +245,7 @@ static struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *shost,
*
* Returns: The allocated scsi command structure.
*/
-struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, int gfp_mask)
+struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, gfp_t gfp_mask)
{
struct scsi_cmnd *cmd;
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index 64fc9e21f35..e69477d1889 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -185,6 +185,7 @@ static struct {
{"PIONEER", "CD-ROM DRM-600", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
{"PIONEER", "CD-ROM DRM-602X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
{"PIONEER", "CD-ROM DRM-604X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
+ {"PIONEER", "CD-ROM DRM-624X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
{"REGAL", "CDC-4X", NULL, BLIST_MAX5LUN | BLIST_SINGLELUN},
{"SanDisk", "ImageMate CF-SD1", NULL, BLIST_FORCELUN},
{"SEAGATE", "ST34555N", "0930", BLIST_NOTQ}, /* Chokes on tagged INQUIRY */
diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c
index de7f98cc38f..6a3f6aae8a9 100644
--- a/drivers/scsi/scsi_ioctl.c
+++ b/drivers/scsi/scsi_ioctl.c
@@ -205,7 +205,8 @@ int scsi_ioctl_send_command(struct scsi_device *sdev,
unsigned int inlen, outlen, cmdlen;
unsigned int needed, buf_needed;
int timeout, retries, result;
- int data_direction, gfp_mask = GFP_KERNEL;
+ int data_direction;
+ gfp_t gfp_mask = GFP_KERNEL;
if (!sic)
return -EINVAL;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index dc9c772bc87..3ff53880978 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -97,7 +97,6 @@ int scsi_insert_special_req(struct scsi_request *sreq, int at_head)
}
static void scsi_run_queue(struct request_queue *q);
-static void scsi_release_buffers(struct scsi_cmnd *cmd);
/*
* Function: scsi_unprep_request()
@@ -678,7 +677,7 @@ static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate,
return NULL;
}
-static struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, int gfp_mask)
+static struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, gfp_t gfp_mask)
{
struct scsi_host_sg_pool *sgp;
struct scatterlist *sgl;
@@ -1040,8 +1039,10 @@ static int scsi_init_io(struct scsi_cmnd *cmd)
* if sg table allocation fails, requeue request later.
*/
sgpnt = scsi_alloc_sgtable(cmd, GFP_ATOMIC);
- if (unlikely(!sgpnt))
+ if (unlikely(!sgpnt)) {
+ scsi_unprep_request(req);
return BLKPREP_DEFER;
+ }
cmd->request_buffer = (char *) sgpnt;
cmd->request_bufflen = req->nr_sectors << 9;
@@ -1245,8 +1246,8 @@ static int scsi_prep_fn(struct request_queue *q, struct request *req)
*/
ret = scsi_init_io(cmd);
switch(ret) {
+ /* For BLKPREP_KILL/DEFER the cmd was released */
case BLKPREP_KILL:
- /* BLKPREP_KILL return also releases the command */
goto kill;
case BLKPREP_DEFER:
goto defer;
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 2cab556b6e8..771e97ef136 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -819,12 +819,15 @@ show_fc_private_host_tgtid_bind_type(struct class_device *cdev, char *buf)
return snprintf(buf, FC_BINDTYPE_MAX_NAMELEN, "%s\n", name);
}
+#define get_list_head_entry(pos, head, member) \
+ pos = list_entry((head)->next, typeof(*pos), member)
+
static ssize_t
store_fc_private_host_tgtid_bind_type(struct class_device *cdev,
const char *buf, size_t count)
{
struct Scsi_Host *shost = transport_class_to_shost(cdev);
- struct fc_rport *rport, *next_rport;
+ struct fc_rport *rport;
enum fc_tgtid_binding_type val;
unsigned long flags;
@@ -834,9 +837,13 @@ store_fc_private_host_tgtid_bind_type(struct class_device *cdev,
/* if changing bind type, purge all unused consistent bindings */
if (val != fc_host_tgtid_bind_type(shost)) {
spin_lock_irqsave(shost->host_lock, flags);
- list_for_each_entry_safe(rport, next_rport,
- &fc_host_rport_bindings(shost), peers)
+ while (!list_empty(&fc_host_rport_bindings(shost))) {
+ get_list_head_entry(rport,
+ &fc_host_rport_bindings(shost), peers);
+ spin_unlock_irqrestore(shost->host_lock, flags);
fc_rport_terminate(rport);
+ spin_lock_irqsave(shost->host_lock, flags);
+ }
spin_unlock_irqrestore(shost->host_lock, flags);
}
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index ad94367df43..fd56b7ec88b 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -2644,7 +2644,7 @@ static char *
sg_page_malloc(int rqSz, int lowDma, int *retSzp)
{
char *resp = NULL;
- int page_mask;
+ gfp_t page_mask;
int order, a_size;
int resSz = rqSz;
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index d001c046551..927d700f007 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -3577,7 +3577,8 @@ static long st_compat_ioctl(struct file *file, unsigned int cmd, unsigned long a
static struct st_buffer *
new_tape_buffer(int from_initialization, int need_dma, int max_sg)
{
- int i, priority, got = 0, segs = 0;
+ int i, got = 0, segs = 0;
+ gfp_t priority;
struct st_buffer *tb;
if (from_initialization)
@@ -3610,7 +3611,8 @@ static struct st_buffer *
/* Try to allocate enough space in the tape buffer */
static int enlarge_buffer(struct st_buffer * STbuffer, int new_size, int need_dma)
{
- int segs, nbr, max_segs, b_size, priority, order, got;
+ int segs, nbr, max_segs, b_size, order, got;
+ gfp_t priority;
if (new_size <= STbuffer->buffer_size)
return 1;
diff --git a/drivers/serial/8250_pci.c b/drivers/serial/8250_pci.c
index 0e21f583690..5c3c03932d6 100644
--- a/drivers/serial/8250_pci.c
+++ b/drivers/serial/8250_pci.c
@@ -152,6 +152,7 @@ static int __devinit pci_hp_diva_init(struct pci_dev *dev)
rc = 4;
break;
case PCI_DEVICE_ID_HP_DIVA_POWERBAR:
+ case PCI_DEVICE_ID_HP_DIVA_HURRICANE:
rc = 1;
break;
}
@@ -226,8 +227,10 @@ static int __devinit pci_plx9050_init(struct pci_dev *dev)
}
irq_config = 0x41;
- if (dev->vendor == PCI_VENDOR_ID_PANACOM)
+ if (dev->vendor == PCI_VENDOR_ID_PANACOM ||
+ dev->subsystem_vendor == PCI_SUBVENDOR_ID_EXSYS) {
irq_config = 0x43;
+ }
if ((dev->vendor == PCI_VENDOR_ID_PLX) &&
(dev->device == PCI_DEVICE_ID_PLX_ROMULUS)) {
/*
@@ -664,6 +667,15 @@ static struct pci_serial_quirk pci_serial_quirks[] = {
{
.vendor = PCI_VENDOR_ID_PLX,
.device = PCI_DEVICE_ID_PLX_9050,
+ .subvendor = PCI_SUBVENDOR_ID_EXSYS,
+ .subdevice = PCI_SUBDEVICE_ID_EXSYS_4055,
+ .init = pci_plx9050_init,
+ .setup = pci_default_setup,
+ .exit = __devexit_p(pci_plx9050_exit),
+ },
+ {
+ .vendor = PCI_VENDOR_ID_PLX,
+ .device = PCI_DEVICE_ID_PLX_9050,
.subvendor = PCI_SUBVENDOR_ID_KEYSPAN,
.subdevice = PCI_SUBDEVICE_ID_KEYSPAN_SX2,
.init = pci_plx9050_init,
@@ -927,6 +939,7 @@ enum pci_board_num_t {
pbn_panacom,
pbn_panacom2,
pbn_panacom4,
+ pbn_exsys_4055,
pbn_plx_romulus,
pbn_oxsemi,
pbn_intel_i960,
@@ -1292,6 +1305,13 @@ static struct pciserial_board pci_boards[] __devinitdata = {
.reg_shift = 7,
},
+ [pbn_exsys_4055] = {
+ .flags = FL_BASE2,
+ .num_ports = 4,
+ .base_baud = 115200,
+ .uart_offset = 8,
+ },
+
/* I think this entry is broken - the first_offset looks wrong --rmk */
[pbn_plx_romulus] = {
.flags = FL_BASE2,
@@ -1853,6 +1873,10 @@ static struct pci_device_id serial_pci_tbl[] = {
PCI_SUBVENDOR_ID_CHASE_PCIRAS,
PCI_SUBDEVICE_ID_CHASE_PCIRAS8, 0, 0,
pbn_b2_8_460800 },
+ { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050,
+ PCI_SUBVENDOR_ID_EXSYS,
+ PCI_SUBDEVICE_ID_EXSYS_4055, 0, 0,
+ pbn_exsys_4055 },
/*
* Megawolf Romulus PCI Serial Card, from Mike Hudson
* (Exoray@isys.ca)
diff --git a/drivers/usb/core/buffer.c b/drivers/usb/core/buffer.c
index fc15b4acc8a..57e800ac3ce 100644
--- a/drivers/usb/core/buffer.c
+++ b/drivers/usb/core/buffer.c
@@ -106,7 +106,7 @@ void hcd_buffer_destroy (struct usb_hcd *hcd)
void *hcd_buffer_alloc (
struct usb_bus *bus,
size_t size,
- unsigned mem_flags,
+ gfp_t mem_flags,
dma_addr_t *dma
)
{
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 1017a97a418..ff19d64041b 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1112,7 +1112,7 @@ static void urb_unlink (struct urb *urb)
* expects usb_submit_urb() to have sanity checked and conditioned all
* inputs in the urb
*/
-static int hcd_submit_urb (struct urb *urb, unsigned mem_flags)
+static int hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
{
int status;
struct usb_hcd *hcd = urb->dev->bus->hcpriv;
diff --git a/drivers/usb/core/hcd.h b/drivers/usb/core/hcd.h
index ac451fa7e4d..1f1ed6211af 100644
--- a/drivers/usb/core/hcd.h
+++ b/drivers/usb/core/hcd.h
@@ -142,12 +142,12 @@ struct hcd_timeout { /* timeouts we allocate */
struct usb_operations {
int (*get_frame_number) (struct usb_device *usb_dev);
- int (*submit_urb) (struct urb *urb, unsigned mem_flags);
+ int (*submit_urb) (struct urb *urb, gfp_t mem_flags);
int (*unlink_urb) (struct urb *urb, int status);
/* allocate dma-consistent buffer for URB_DMA_NOMAPPING */
void *(*buffer_alloc)(struct usb_bus *bus, size_t size,
- unsigned mem_flags,
+ gfp_t mem_flags,
dma_addr_t *dma);
void (*buffer_free)(struct usb_bus *bus, size_t size,
void *addr, dma_addr_t dma);
@@ -200,7 +200,7 @@ struct hc_driver {
int (*urb_enqueue) (struct usb_hcd *hcd,
struct usb_host_endpoint *ep,
struct urb *urb,
- unsigned mem_flags);
+ gfp_t mem_flags);
int (*urb_dequeue) (struct usb_hcd *hcd, struct urb *urb);
/* hw synch, freeing endpoint resources that urb_dequeue can't */
@@ -247,7 +247,7 @@ int hcd_buffer_create (struct usb_hcd *hcd);
void hcd_buffer_destroy (struct usb_hcd *hcd);
void *hcd_buffer_alloc (struct usb_bus *bus, size_t size,
- unsigned mem_flags, dma_addr_t *dma);
+ gfp_t mem_flags, dma_addr_t *dma);
void hcd_buffer_free (struct usb_bus *bus, size_t size,
void *addr, dma_addr_t dma);
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index f1fb67fe22a..f9a81e84dbd 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -321,7 +321,7 @@ int usb_sg_init (
struct scatterlist *sg,
int nents,
size_t length,
- unsigned mem_flags
+ gfp_t mem_flags
)
{
int i;
diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
index c846fefb738..b32898e0a27 100644
--- a/drivers/usb/core/urb.c
+++ b/drivers/usb/core/urb.c
@@ -60,7 +60,7 @@ void usb_init_urb(struct urb *urb)
*
* The driver must call usb_free_urb() when it is finished with the urb.
*/
-struct urb *usb_alloc_urb(int iso_packets, unsigned mem_flags)
+struct urb *usb_alloc_urb(int iso_packets, gfp_t mem_flags)
{
struct urb *urb;
@@ -224,7 +224,7 @@ struct urb * usb_get_urb(struct urb *urb)
* GFP_NOIO, unless b) or c) apply
*
*/
-int usb_submit_urb(struct urb *urb, unsigned mem_flags)
+int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
{
int pipe, temp, max;
struct usb_device *dev;
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 7d131509e41..4c57f3f649e 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -1147,7 +1147,7 @@ int __usb_get_extra_descriptor(char *buffer, unsigned size,
void *usb_buffer_alloc (
struct usb_device *dev,
size_t size,
- unsigned mem_flags,
+ gfp_t mem_flags,
dma_addr_t *dma
)
{
diff --git a/drivers/usb/gadget/dummy_hcd.c b/drivers/usb/gadget/dummy_hcd.c
index 583db7c38cf..8d9d8ee8955 100644
--- a/drivers/usb/gadget/dummy_hcd.c
+++ b/drivers/usb/gadget/dummy_hcd.c
@@ -470,7 +470,7 @@ static int dummy_disable (struct usb_ep *_ep)
}
static struct usb_request *
-dummy_alloc_request (struct usb_ep *_ep, unsigned mem_flags)
+dummy_alloc_request (struct usb_ep *_ep, gfp_t mem_flags)
{
struct dummy_ep *ep;
struct dummy_request *req;
@@ -507,7 +507,7 @@ dummy_alloc_buffer (
struct usb_ep *_ep,
unsigned bytes,
dma_addr_t *dma,
- unsigned mem_flags
+ gfp_t mem_flags
) {
char *retval;
struct dummy_ep *ep;
@@ -541,7 +541,7 @@ fifo_complete (struct usb_ep *ep, struct usb_request *req)
static int
dummy_queue (struct usb_ep *_ep, struct usb_request *_req,
- unsigned mem_flags)
+ gfp_t mem_flags)
{
struct dummy_ep *ep;
struct dummy_request *req;
@@ -999,7 +999,7 @@ static int dummy_urb_enqueue (
struct usb_hcd *hcd,
struct usb_host_endpoint *ep,
struct urb *urb,
- unsigned mem_flags
+ gfp_t mem_flags
) {
struct dummy *dum;
struct urbp *urbp;
diff --git a/drivers/usb/gadget/ether.c b/drivers/usb/gadget/ether.c
index 49459e33e95..f1024e804d5 100644
--- a/drivers/usb/gadget/ether.c
+++ b/drivers/usb/gadget/ether.c
@@ -945,11 +945,11 @@ config_buf (enum usb_device_speed speed,
/*-------------------------------------------------------------------------*/
-static void eth_start (struct eth_dev *dev, unsigned gfp_flags);
-static int alloc_requests (struct eth_dev *dev, unsigned n, unsigned gfp_flags);
+static void eth_start (struct eth_dev *dev, gfp_t gfp_flags);
+static int alloc_requests (struct eth_dev *dev, unsigned n, gfp_t gfp_flags);
static int
-set_ether_config (struct eth_dev *dev, unsigned gfp_flags)
+set_ether_config (struct eth_dev *dev, gfp_t gfp_flags)
{
int result = 0;
struct usb_gadget *gadget = dev->gadget;
@@ -1081,7 +1081,7 @@ static void eth_reset_config (struct eth_dev *dev)
* that returns config descriptors, and altsetting code.
*/
static int
-eth_set_config (struct eth_dev *dev, unsigned number, unsigned gfp_flags)
+eth_set_config (struct eth_dev *dev, unsigned number, gfp_t gfp_flags)
{
int result = 0;
struct usb_gadget *gadget = dev->gadget;
@@ -1598,7 +1598,7 @@ static void defer_kevent (struct eth_dev *dev, int flag)
static void rx_complete (struct usb_ep *ep, struct usb_request *req);
static int
-rx_submit (struct eth_dev *dev, struct usb_request *req, unsigned gfp_flags)
+rx_submit (struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags)
{
struct sk_buff *skb;
int retval = -ENOMEM;
@@ -1724,7 +1724,7 @@ clean:
}
static int prealloc (struct list_head *list, struct usb_ep *ep,
- unsigned n, unsigned gfp_flags)
+ unsigned n, gfp_t gfp_flags)
{
unsigned i;
struct usb_request *req;
@@ -1763,7 +1763,7 @@ extra:
return 0;
}
-static int alloc_requests (struct eth_dev *dev, unsigned n, unsigned gfp_flags)
+static int alloc_requests (struct eth_dev *dev, unsigned n, gfp_t gfp_flags)
{
int status;
@@ -1779,7 +1779,7 @@ fail:
return status;
}
-static void rx_fill (struct eth_dev *dev, unsigned gfp_flags)
+static void rx_fill (struct eth_dev *dev, gfp_t gfp_flags)
{
struct usb_request *req;
unsigned long flags;
@@ -1962,7 +1962,7 @@ drop:
* normally just one notification will be queued.
*/
-static struct usb_request *eth_req_alloc (struct usb_ep *, unsigned, unsigned);
+static struct usb_request *eth_req_alloc (struct usb_ep *, unsigned, gfp_t);
static void eth_req_free (struct usb_ep *ep, struct usb_request *req);
static void
@@ -2024,7 +2024,7 @@ static int rndis_control_ack (struct net_device *net)
#endif /* RNDIS */
-static void eth_start (struct eth_dev *dev, unsigned gfp_flags)
+static void eth_start (struct eth_dev *dev, gfp_t gfp_flags)
{
DEBUG (dev, "%s\n", __FUNCTION__);
@@ -2092,7 +2092,7 @@ static int eth_stop (struct net_device *net)
/*-------------------------------------------------------------------------*/
static struct usb_request *
-eth_req_alloc (struct usb_ep *ep, unsigned size, unsigned gfp_flags)
+eth_req_alloc (struct usb_ep *ep, unsigned size, gfp_t gfp_flags)
{
struct usb_request *req;
diff --git a/drivers/usb/gadget/goku_udc.c b/drivers/usb/gadget/goku_udc.c
index eaab26f4ed3..b0f3cd63e3b 100644
--- a/drivers/usb/gadget/goku_udc.c
+++ b/drivers/usb/gadget/goku_udc.c
@@ -269,7 +269,7 @@ static int goku_ep_disable(struct usb_ep *_ep)
/*-------------------------------------------------------------------------*/
static struct usb_request *
-goku_alloc_request(struct usb_ep *_ep, unsigned gfp_flags)
+goku_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
{
struct goku_request *req;
@@ -327,7 +327,7 @@ goku_free_request(struct usb_ep *_ep, struct usb_request *_req)
*/
static void *
goku_alloc_buffer(struct usb_ep *_ep, unsigned bytes,
- dma_addr_t *dma, unsigned gfp_flags)
+ dma_addr_t *dma, gfp_t gfp_flags)
{
void *retval;
struct goku_ep *ep;
@@ -789,7 +789,7 @@ finished:
/*-------------------------------------------------------------------------*/
static int
-goku_queue(struct usb_ep *_ep, struct usb_request *_req, unsigned gfp_flags)
+goku_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
{
struct goku_request *req;
struct goku_ep *ep;
diff --git a/drivers/usb/gadget/lh7a40x_udc.c b/drivers/usb/gadget/lh7a40x_udc.c
index 4842577789c..012d1e5f152 100644
--- a/drivers/usb/gadget/lh7a40x_udc.c
+++ b/drivers/usb/gadget/lh7a40x_udc.c
@@ -71,13 +71,13 @@ static char *state_names[] = {
static int lh7a40x_ep_enable(struct usb_ep *ep,
const struct usb_endpoint_descriptor *);
static int lh7a40x_ep_disable(struct usb_ep *ep);
-static struct usb_request *lh7a40x_alloc_request(struct usb_ep *ep, int);
+static struct usb_request *lh7a40x_alloc_request(struct usb_ep *ep, gfp_t);
static void lh7a40x_free_request(struct usb_ep *ep, struct usb_request *);
static void *lh7a40x_alloc_buffer(struct usb_ep *ep, unsigned, dma_addr_t *,
- int);
+ gfp_t);
static void lh7a40x_free_buffer(struct usb_ep *ep, void *, dma_addr_t,
unsigned);
-static int lh7a40x_queue(struct usb_ep *ep, struct usb_request *, int);
+static int lh7a40x_queue(struct usb_ep *ep, struct usb_request *, gfp_t);
static int lh7a40x_dequeue(struct usb_ep *ep, struct usb_request *);
static int lh7a40x_set_halt(struct usb_ep *ep, int);
static int lh7a40x_fifo_status(struct usb_ep *ep);
@@ -1106,7 +1106,7 @@ static int lh7a40x_ep_disable(struct usb_ep *_ep)
}
static struct usb_request *lh7a40x_alloc_request(struct usb_ep *ep,
- unsigned gfp_flags)
+ gfp_t gfp_flags)
{
struct lh7a40x_request *req;
@@ -1134,7 +1134,7 @@ static void lh7a40x_free_request(struct usb_ep *ep, struct usb_request *_req)
}
static void *lh7a40x_alloc_buffer(struct usb_ep *ep, unsigned bytes,
- dma_addr_t * dma, unsigned gfp_flags)
+ dma_addr_t * dma, gfp_t gfp_flags)
{
char *retval;
@@ -1158,7 +1158,7 @@ static void lh7a40x_free_buffer(struct usb_ep *ep, void *buf, dma_addr_t dma,
* NOTE: Sets INDEX register
*/
static int lh7a40x_queue(struct usb_ep *_ep, struct usb_request *_req,
- unsigned gfp_flags)
+ gfp_t gfp_flags)
{
struct lh7a40x_request *req;
struct lh7a40x_ep *ep;
diff --git a/drivers/usb/gadget/net2280.c b/drivers/usb/gadget/net2280.c
index 477fab2e74d..c32e1f7476d 100644
--- a/drivers/usb/gadget/net2280.c
+++ b/drivers/usb/gadget/net2280.c
@@ -376,7 +376,7 @@ static int net2280_disable (struct usb_ep *_ep)
/*-------------------------------------------------------------------------*/
static struct usb_request *
-net2280_alloc_request (struct usb_ep *_ep, unsigned gfp_flags)
+net2280_alloc_request (struct usb_ep *_ep, gfp_t gfp_flags)
{
struct net2280_ep *ep;
struct net2280_request *req;
@@ -463,7 +463,7 @@ net2280_alloc_buffer (
struct usb_ep *_ep,
unsigned bytes,
dma_addr_t *dma,
- unsigned gfp_flags
+ gfp_t gfp_flags
)
{
void *retval;
@@ -897,7 +897,7 @@ done (struct net2280_ep *ep, struct net2280_request *req, int status)
/*-------------------------------------------------------------------------*/
static int
-net2280_queue (struct usb_ep *_ep, struct usb_request *_req, unsigned gfp_flags)
+net2280_queue (struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
{
struct net2280_request *req;
struct net2280_ep *ep;
diff --git a/drivers/usb/gadget/omap_udc.c b/drivers/usb/gadget/omap_udc.c
index ff5533e6956..287c5900fb1 100644
--- a/drivers/usb/gadget/omap_udc.c
+++ b/drivers/usb/gadget/omap_udc.c
@@ -269,7 +269,7 @@ static int omap_ep_disable(struct usb_ep *_ep)
/*-------------------------------------------------------------------------*/
static struct usb_request *
-omap_alloc_request(struct usb_ep *ep, unsigned gfp_flags)
+omap_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
{
struct omap_req *req;
@@ -298,7 +298,7 @@ omap_alloc_buffer(
struct usb_ep *_ep,
unsigned bytes,
dma_addr_t *dma,
- unsigned gfp_flags
+ gfp_t gfp_flags
)
{
void *retval;
@@ -937,7 +937,7 @@ static void dma_channel_release(struct omap_ep *ep)
/*-------------------------------------------------------------------------*/
static int
-omap_ep_queue(struct usb_ep *_ep, struct usb_request *_req, unsigned gfp_flags)
+omap_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
{
struct omap_ep *ep = container_of(_ep, struct omap_ep, ep);
struct omap_req *req = container_of(_req, struct omap_req, req);
diff --git a/drivers/usb/gadget/pxa2xx_udc.c b/drivers/usb/gadget/pxa2xx_udc.c
index 73f8c940415..6e545393cff 100644
--- a/drivers/usb/gadget/pxa2xx_udc.c
+++ b/drivers/usb/gadget/pxa2xx_udc.c
@@ -332,7 +332,7 @@ static int pxa2xx_ep_disable (struct usb_ep *_ep)
* pxa2xx_ep_alloc_request - allocate a request data structure
*/
static struct usb_request *
-pxa2xx_ep_alloc_request (struct usb_ep *_ep, unsigned gfp_flags)
+pxa2xx_ep_alloc_request (struct usb_ep *_ep, gfp_t gfp_flags)
{
struct pxa2xx_request *req;
@@ -367,7 +367,7 @@ pxa2xx_ep_free_request (struct usb_ep *_ep, struct usb_request *_req)
*/
static void *
pxa2xx_ep_alloc_buffer(struct usb_ep *_ep, unsigned bytes,
- dma_addr_t *dma, unsigned gfp_flags)
+ dma_addr_t *dma, gfp_t gfp_flags)
{
char *retval;
@@ -874,7 +874,7 @@ done:
/*-------------------------------------------------------------------------*/
static int
-pxa2xx_ep_queue(struct usb_ep *_ep, struct usb_request *_req, unsigned gfp_flags)
+pxa2xx_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
{
struct pxa2xx_request *req;
struct pxa2xx_ep *ep;
diff --git a/drivers/usb/gadget/serial.c b/drivers/usb/gadget/serial.c
index c925d9222f5..b35ac6d334f 100644
--- a/drivers/usb/gadget/serial.c
+++ b/drivers/usb/gadget/serial.c
@@ -300,18 +300,18 @@ static int gs_build_config_buf(u8 *buf, enum usb_device_speed speed,
u8 type, unsigned int index, int is_otg);
static struct usb_request *gs_alloc_req(struct usb_ep *ep, unsigned int len,
- unsigned kmalloc_flags);
+ gfp_t kmalloc_flags);
static void gs_free_req(struct usb_ep *ep, struct usb_request *req);
static struct gs_req_entry *gs_alloc_req_entry(struct usb_ep *ep, unsigned len,
- unsigned kmalloc_flags);
+ gfp_t kmalloc_flags);
static void gs_free_req_entry(struct usb_ep *ep, struct gs_req_entry *req);
-static int gs_alloc_ports(struct gs_dev *dev, unsigned kmalloc_flags);
+static int gs_alloc_ports(struct gs_dev *dev, gfp_t kmalloc_flags);
static void gs_free_ports(struct gs_dev *dev);
/* circular buffer */
-static struct gs_buf *gs_buf_alloc(unsigned int size, unsigned kmalloc_flags);
+static struct gs_buf *gs_buf_alloc(unsigned int size, gfp_t kmalloc_flags);
static void gs_buf_free(struct gs_buf *gb);
static void gs_buf_clear(struct gs_buf *gb);
static unsigned int gs_buf_data_avail(struct gs_buf *gb);
@@ -2091,7 +2091,7 @@ static int gs_build_config_buf(u8 *buf, enum usb_device_speed speed,
* usb_request or NULL if there is an error.
*/
static struct usb_request *
-gs_alloc_req(struct usb_ep *ep, unsigned int len, unsigned kmalloc_flags)
+gs_alloc_req(struct usb_ep *ep, unsigned int len, gfp_t kmalloc_flags)
{
struct usb_request *req;
@@ -2132,7 +2132,7 @@ static void gs_free_req(struct usb_ep *ep, struct usb_request *req)
* endpoint, buffer len, and kmalloc flags.
*/
static struct gs_req_entry *
-gs_alloc_req_entry(struct usb_ep *ep, unsigned len, unsigned kmalloc_flags)
+gs_alloc_req_entry(struct usb_ep *ep, unsigned len, gfp_t kmalloc_flags)
{
struct gs_req_entry *req;
@@ -2173,7 +2173,7 @@ static void gs_free_req_entry(struct usb_ep *ep, struct gs_req_entry *req)
*
* The device lock is normally held when calling this function.
*/
-static int gs_alloc_ports(struct gs_dev *dev, unsigned kmalloc_flags)
+static int gs_alloc_ports(struct gs_dev *dev, gfp_t kmalloc_flags)
{
int i;
struct gs_port *port;
@@ -2255,7 +2255,7 @@ static void gs_free_ports(struct gs_dev *dev)
*
* Allocate a circular buffer and all associated memory.
*/
-static struct gs_buf *gs_buf_alloc(unsigned int size, unsigned kmalloc_flags)
+static struct gs_buf *gs_buf_alloc(unsigned int size, gfp_t kmalloc_flags)
{
struct gs_buf *gb;
diff --git a/drivers/usb/gadget/zero.c b/drivers/usb/gadget/zero.c
index 6890e773b2a..ec9c424f1d9 100644
--- a/drivers/usb/gadget/zero.c
+++ b/drivers/usb/gadget/zero.c
@@ -612,7 +612,7 @@ static void source_sink_complete (struct usb_ep *ep, struct usb_request *req)
}
static struct usb_request *
-source_sink_start_ep (struct usb_ep *ep, unsigned gfp_flags)
+source_sink_start_ep (struct usb_ep *ep, gfp_t gfp_flags)
{
struct usb_request *req;
int status;
@@ -640,7 +640,7 @@ source_sink_start_ep (struct usb_ep *ep, unsigned gfp_flags)
}
static int
-set_source_sink_config (struct zero_dev *dev, unsigned gfp_flags)
+set_source_sink_config (struct zero_dev *dev, gfp_t gfp_flags)
{
int result = 0;
struct usb_ep *ep;
@@ -744,7 +744,7 @@ static void loopback_complete (struct usb_ep *ep, struct usb_request *req)
}
static int
-set_loopback_config (struct zero_dev *dev, unsigned gfp_flags)
+set_loopback_config (struct zero_dev *dev, gfp_t gfp_flags)
{
int result = 0;
struct usb_ep *ep;
@@ -845,7 +845,7 @@ static void zero_reset_config (struct zero_dev *dev)
* by limiting configuration choices (like the pxa2xx).
*/
static int
-zero_set_config (struct zero_dev *dev, unsigned number, unsigned gfp_flags)
+zero_set_config (struct zero_dev *dev, unsigned number, gfp_t gfp_flags)
{
int result = 0;
struct usb_gadget *gadget = dev->gadget;
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index b948ffd94f4..f5eb9e7b5b1 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -983,7 +983,7 @@ static int ehci_urb_enqueue (
struct usb_hcd *hcd,
struct usb_host_endpoint *ep,
struct urb *urb,
- unsigned mem_flags
+ gfp_t mem_flags
) {
struct ehci_hcd *ehci = hcd_to_ehci (hcd);
struct list_head qtd_list;
diff --git a/drivers/usb/host/ehci-mem.c b/drivers/usb/host/ehci-mem.c
index 5c38ad86948..91c2ab43cbc 100644
--- a/drivers/usb/host/ehci-mem.c
+++ b/drivers/usb/host/ehci-mem.c
@@ -45,7 +45,7 @@ static inline void ehci_qtd_init (struct ehci_qtd *qtd, dma_addr_t dma)
INIT_LIST_HEAD (&qtd->qtd_list);
}
-static struct ehci_qtd *ehci_qtd_alloc (struct ehci_hcd *ehci, int flags)
+static struct ehci_qtd *ehci_qtd_alloc (struct ehci_hcd *ehci, gfp_t flags)
{
struct ehci_qtd *qtd;
dma_addr_t dma;
@@ -79,7 +79,7 @@ static void qh_destroy (struct kref *kref)
dma_pool_free (ehci->qh_pool, qh, qh->qh_dma);
}
-static struct ehci_qh *ehci_qh_alloc (struct ehci_hcd *ehci, int flags)
+static struct ehci_qh *ehci_qh_alloc (struct ehci_hcd *ehci, gfp_t flags)
{
struct ehci_qh *qh;
dma_addr_t dma;
@@ -161,7 +161,7 @@ static void ehci_mem_cleanup (struct ehci_hcd *ehci)
}
/* remember to add cleanup code (above) if you add anything here */
-static int ehci_mem_init (struct ehci_hcd *ehci, int flags)
+static int ehci_mem_init (struct ehci_hcd *ehci, gfp_t flags)
{
int i;
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
index 940d38ca7d9..5bb872c3496 100644
--- a/drivers/usb/host/ehci-q.c
+++ b/drivers/usb/host/ehci-q.c
@@ -477,7 +477,7 @@ qh_urb_transaction (
struct ehci_hcd *ehci,
struct urb *urb,
struct list_head *head,
- int flags
+ gfp_t flags
) {
struct ehci_qtd *qtd, *qtd_prev;
dma_addr_t buf;
@@ -629,7 +629,7 @@ static struct ehci_qh *
qh_make (
struct ehci_hcd *ehci,
struct urb *urb,
- int flags
+ gfp_t flags
) {
struct ehci_qh *qh = ehci_qh_alloc (ehci, flags);
u32 info1 = 0, info2 = 0;
@@ -906,7 +906,7 @@ submit_async (
struct usb_host_endpoint *ep,
struct urb *urb,
struct list_head *qtd_list,
- unsigned mem_flags
+ gfp_t mem_flags
) {
struct ehci_qtd *qtd;
int epnum;
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index ccc7300baa6..f0c8aa1ccd5 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -589,7 +589,7 @@ static int intr_submit (
struct usb_host_endpoint *ep,
struct urb *urb,
struct list_head *qtd_list,
- unsigned mem_flags
+ gfp_t mem_flags
) {
unsigned epnum;
unsigned long flags;
@@ -634,7 +634,7 @@ done:
/* ehci_iso_stream ops work with both ITD and SITD */
static struct ehci_iso_stream *
-iso_stream_alloc (unsigned mem_flags)
+iso_stream_alloc (gfp_t mem_flags)
{
struct ehci_iso_stream *stream;
@@ -851,7 +851,7 @@ iso_stream_find (struct ehci_hcd *ehci, struct urb *urb)
/* ehci_iso_sched ops can be ITD-only or SITD-only */
static struct ehci_iso_sched *
-iso_sched_alloc (unsigned packets, unsigned mem_flags)
+iso_sched_alloc (unsigned packets, gfp_t mem_flags)
{
struct ehci_iso_sched *iso_sched;
int size = sizeof *iso_sched;
@@ -924,7 +924,7 @@ itd_urb_transaction (
struct ehci_iso_stream *stream,
struct ehci_hcd *ehci,
struct urb *urb,
- unsigned mem_flags
+ gfp_t mem_flags
)
{
struct ehci_itd *itd;
@@ -1418,7 +1418,7 @@ itd_complete (
/*-------------------------------------------------------------------------*/
static int itd_submit (struct ehci_hcd *ehci, struct urb *urb,
- unsigned mem_flags)
+ gfp_t mem_flags)
{
int status = -EINVAL;
unsigned long flags;
@@ -1529,7 +1529,7 @@ sitd_urb_transaction (
struct ehci_iso_stream *stream,
struct ehci_hcd *ehci,
struct urb *urb,
- unsigned mem_flags
+ gfp_t mem_flags
)
{
struct ehci_sitd *sitd;
@@ -1779,7 +1779,7 @@ sitd_complete (
static int sitd_submit (struct ehci_hcd *ehci, struct urb *urb,
- unsigned mem_flags)
+ gfp_t mem_flags)
{
int status = -EINVAL;
unsigned long flags;
diff --git a/drivers/usb/host/isp116x-hcd.c b/drivers/usb/host/isp116x-hcd.c
index e142056b0d2..2548d94fcd7 100644
--- a/drivers/usb/host/isp116x-hcd.c
+++ b/drivers/usb/host/isp116x-hcd.c
@@ -694,7 +694,7 @@ static int balance(struct isp116x *isp116x, u16 period, u16 load)
static int isp116x_urb_enqueue(struct usb_hcd *hcd,
struct usb_host_endpoint *hep, struct urb *urb,
- unsigned mem_flags)
+ gfp_t mem_flags)
{
struct isp116x *isp116x = hcd_to_isp116x(hcd);
struct usb_device *udev = urb->dev;
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 67c1aa5eb1c..f8da8c7af7c 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -180,7 +180,7 @@ static int ohci_urb_enqueue (
struct usb_hcd *hcd,
struct usb_host_endpoint *ep,
struct urb *urb,
- unsigned mem_flags
+ gfp_t mem_flags
) {
struct ohci_hcd *ohci = hcd_to_ohci (hcd);
struct ed *ed;
diff --git a/drivers/usb/host/ohci-mem.c b/drivers/usb/host/ohci-mem.c
index fd3c4d3714b..9fb83dfb1eb 100644
--- a/drivers/usb/host/ohci-mem.c
+++ b/drivers/usb/host/ohci-mem.c
@@ -84,7 +84,7 @@ dma_to_td (struct ohci_hcd *hc, dma_addr_t td_dma)
/* TDs ... */
static struct td *
-td_alloc (struct ohci_hcd *hc, unsigned mem_flags)
+td_alloc (struct ohci_hcd *hc, gfp_t mem_flags)
{
dma_addr_t dma;
struct td *td;
@@ -118,7 +118,7 @@ td_free (struct ohci_hcd *hc, struct td *td)
/* EDs ... */
static struct ed *
-ed_alloc (struct ohci_hcd *hc, unsigned mem_flags)
+ed_alloc (struct ohci_hcd *hc, gfp_t mem_flags)
{
dma_addr_t dma;
struct ed *ed;
diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c
index d42a15d10a4..cad858575ce 100644
--- a/drivers/usb/host/sl811-hcd.c
+++ b/drivers/usb/host/sl811-hcd.c
@@ -818,7 +818,7 @@ static int sl811h_urb_enqueue(
struct usb_hcd *hcd,
struct usb_host_endpoint *hep,
struct urb *urb,
- unsigned mem_flags
+ gfp_t mem_flags
) {
struct sl811 *sl811 = hcd_to_sl811(hcd);
struct usb_device *udev = urb->dev;
diff --git a/drivers/usb/host/uhci-q.c b/drivers/usb/host/uhci-q.c
index ea0d168a8c6..4e0fbe2c1a9 100644
--- a/drivers/usb/host/uhci-q.c
+++ b/drivers/usb/host/uhci-q.c
@@ -1164,7 +1164,7 @@ static struct urb *uhci_find_urb_ep(struct uhci_hcd *uhci, struct urb *urb)
static int uhci_urb_enqueue(struct usb_hcd *hcd,
struct usb_host_endpoint *ep,
- struct urb *urb, unsigned mem_flags)
+ struct urb *urb, gfp_t mem_flags)
{
int ret;
struct uhci_hcd *uhci = hcd_to_uhci(hcd);
diff --git a/drivers/usb/misc/uss720.c b/drivers/usb/misc/uss720.c
index 03fb70ef2eb..0592cb5e6c4 100644
--- a/drivers/usb/misc/uss720.c
+++ b/drivers/usb/misc/uss720.c
@@ -137,7 +137,7 @@ static void async_complete(struct urb *urb, struct pt_regs *ptregs)
static struct uss720_async_request *submit_async_request(struct parport_uss720_private *priv,
__u8 request, __u8 requesttype, __u16 value, __u16 index,
- unsigned int mem_flags)
+ gfp_t mem_flags)
{
struct usb_device *usbdev;
struct uss720_async_request *rq;
@@ -204,7 +204,7 @@ static unsigned int kill_all_async_requests_priv(struct parport_uss720_private *
/* --------------------------------------------------------------------- */
-static int get_1284_register(struct parport *pp, unsigned char reg, unsigned char *val, unsigned int mem_flags)
+static int get_1284_register(struct parport *pp, unsigned char reg, unsigned char *val, gfp_t mem_flags)
{
struct parport_uss720_private *priv;
struct uss720_async_request *rq;
@@ -238,7 +238,7 @@ static int get_1284_register(struct parport *pp, unsigned char reg, unsigned cha
return -EIO;
}
-static int set_1284_register(struct parport *pp, unsigned char reg, unsigned char val, unsigned int mem_flags)
+static int set_1284_register(struct parport *pp, unsigned char reg, unsigned char val, gfp_t mem_flags)
{
struct parport_uss720_private *priv;
struct uss720_async_request *rq;
diff --git a/drivers/usb/net/asix.c b/drivers/usb/net/asix.c
index 861f00a4375..252a34fbb42 100644
--- a/drivers/usb/net/asix.c
+++ b/drivers/usb/net/asix.c
@@ -753,7 +753,7 @@ static int ax88772_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
}
static struct sk_buff *ax88772_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
- unsigned flags)
+ gfp_t flags)
{
int padlen;
int headroom = skb_headroom(skb);
diff --git a/drivers/usb/net/gl620a.c b/drivers/usb/net/gl620a.c
index c8763ae33c7..c0f263b202a 100644
--- a/drivers/usb/net/gl620a.c
+++ b/drivers/usb/net/gl620a.c
@@ -301,7 +301,7 @@ static int genelink_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
}
static struct sk_buff *
-genelink_tx_fixup(struct usbnet *dev, struct sk_buff *skb, unsigned flags)
+genelink_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
{
int padlen;
int length = skb->len;
diff --git a/drivers/usb/net/kaweth.c b/drivers/usb/net/kaweth.c
index e04b0ce3611..c82655d3d44 100644
--- a/drivers/usb/net/kaweth.c
+++ b/drivers/usb/net/kaweth.c
@@ -477,13 +477,13 @@ static int kaweth_reset(struct kaweth_device *kaweth)
}
static void kaweth_usb_receive(struct urb *, struct pt_regs *regs);
-static int kaweth_resubmit_rx_urb(struct kaweth_device *, unsigned);
+static int kaweth_resubmit_rx_urb(struct kaweth_device *, gfp_t);
/****************************************************************
int_callback
*****************************************************************/
-static void kaweth_resubmit_int_urb(struct kaweth_device *kaweth, int mf)
+static void kaweth_resubmit_int_urb(struct kaweth_device *kaweth, gfp_t mf)
{
int status;
@@ -550,7 +550,7 @@ static void kaweth_resubmit_tl(void *d)
* kaweth_resubmit_rx_urb
****************************************************************/
static int kaweth_resubmit_rx_urb(struct kaweth_device *kaweth,
- unsigned mem_flags)
+ gfp_t mem_flags)
{
int result;
diff --git a/drivers/usb/net/net1080.c b/drivers/usb/net/net1080.c
index a4309c4a491..cee55f8cf64 100644
--- a/drivers/usb/net/net1080.c
+++ b/drivers/usb/net/net1080.c
@@ -500,7 +500,7 @@ static int net1080_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
}
static struct sk_buff *
-net1080_tx_fixup(struct usbnet *dev, struct sk_buff *skb, unsigned flags)
+net1080_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
{
int padlen;
struct sk_buff *skb2;
diff --git a/drivers/usb/net/rndis_host.c b/drivers/usb/net/rndis_host.c
index 2ed2e5fb777..b5a925dc1be 100644
--- a/drivers/usb/net/rndis_host.c
+++ b/drivers/usb/net/rndis_host.c
@@ -517,7 +517,7 @@ static int rndis_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
}
static struct sk_buff *
-rndis_tx_fixup(struct usbnet *dev, struct sk_buff *skb, unsigned flags)
+rndis_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
{
struct rndis_data_hdr *hdr;
struct sk_buff *skb2;
diff --git a/drivers/usb/net/usbnet.c b/drivers/usb/net/usbnet.c
index 6c460918d54..fce81d73893 100644
--- a/drivers/usb/net/usbnet.c
+++ b/drivers/usb/net/usbnet.c
@@ -288,7 +288,7 @@ EXPORT_SYMBOL_GPL(usbnet_defer_kevent);
static void rx_complete (struct urb *urb, struct pt_regs *regs);
-static void rx_submit (struct usbnet *dev, struct urb *urb, unsigned flags)
+static void rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
{
struct sk_buff *skb;
struct skb_data *entry;
diff --git a/drivers/usb/net/usbnet.h b/drivers/usb/net/usbnet.h
index 7aa0abd1a9b..89fc4958eec 100644
--- a/drivers/usb/net/usbnet.h
+++ b/drivers/usb/net/usbnet.h
@@ -107,7 +107,7 @@ struct driver_info {
/* fixup tx packet (add framing) */
struct sk_buff *(*tx_fixup)(struct usbnet *dev,
- struct sk_buff *skb, unsigned flags);
+ struct sk_buff *skb, gfp_t flags);
/* for new devices, use the descriptor-reading code instead */
int in; /* rx endpoint */
diff --git a/drivers/usb/net/zaurus.c b/drivers/usb/net/zaurus.c
index ee3b892aeab..5d4b7d55b09 100644
--- a/drivers/usb/net/zaurus.c
+++ b/drivers/usb/net/zaurus.c
@@ -62,7 +62,7 @@
*/
static struct sk_buff *
-zaurus_tx_fixup(struct usbnet *dev, struct sk_buff *skb, unsigned flags)
+zaurus_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
{
int padlen;
struct sk_buff *skb2;
diff --git a/drivers/usb/net/zd1201.c b/drivers/usb/net/zd1201.c
index c4e479ee926..2f52261c7cc 100644
--- a/drivers/usb/net/zd1201.c
+++ b/drivers/usb/net/zd1201.c
@@ -521,7 +521,7 @@ static int zd1201_setconfig(struct zd1201 *zd, int rid, void *buf, int len, int
int reqlen;
char seq=0;
struct urb *urb;
- unsigned int gfp_mask = wait ? GFP_NOIO : GFP_ATOMIC;
+ gfp_t gfp_mask = wait ? GFP_NOIO : GFP_ATOMIC;
len += 4; /* first 4 are for header */
diff --git a/drivers/video/fbsysfs.c b/drivers/video/fbsysfs.c
index 1147b899f00..007c8e9b2b3 100644
--- a/drivers/video/fbsysfs.c
+++ b/drivers/video/fbsysfs.c
@@ -242,6 +242,13 @@ static ssize_t show_virtual(struct class_device *class_device, char *buf)
fb_info->var.yres_virtual);
}
+static ssize_t show_stride(struct class_device *class_device, char *buf)
+{
+ struct fb_info *fb_info =
+ (struct fb_info *)class_get_devdata(class_device);
+ return snprintf(buf, PAGE_SIZE, "%d\n", fb_info->fix.line_length);
+}
+
/* Format for cmap is "%02x%c%4x%4x%4x\n" */
/* %02x entry %c transp %4x red %4x blue %4x green \n */
/* 256 rows at 16 chars equals 4096, the normal page size */
@@ -432,6 +439,7 @@ static struct class_device_attribute class_device_attrs[] = {
__ATTR(pan, S_IRUGO|S_IWUSR, show_pan, store_pan),
__ATTR(virtual_size, S_IRUGO|S_IWUSR, show_virtual, store_virtual),
__ATTR(name, S_IRUGO, show_name, NULL),
+ __ATTR(stride, S_IRUGO, show_stride, NULL),
};
int fb_init_class_device(struct fb_info *fb_info)
diff --git a/fs/afs/file.c b/fs/afs/file.c
index 23c12512802..0d576987ec6 100644
--- a/fs/afs/file.c
+++ b/fs/afs/file.c
@@ -29,7 +29,7 @@ static int afs_file_release(struct inode *inode, struct file *file);
static int afs_file_readpage(struct file *file, struct page *page);
static int afs_file_invalidatepage(struct page *page, unsigned long offset);
-static int afs_file_releasepage(struct page *page, int gfp_flags);
+static int afs_file_releasepage(struct page *page, gfp_t gfp_flags);
static ssize_t afs_file_write(struct file *file, const char __user *buf,
size_t size, loff_t *off);
@@ -279,7 +279,7 @@ static int afs_file_invalidatepage(struct page *page, unsigned long offset)
/*
* release a page and cleanup its private data
*/
-static int afs_file_releasepage(struct page *page, int gfp_flags)
+static int afs_file_releasepage(struct page *page, gfp_t gfp_flags)
{
struct cachefs_page *pageio;
diff --git a/fs/aio.c b/fs/aio.c
index 9fe7216457d..edfca5b7553 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -1397,6 +1397,9 @@ static ssize_t aio_setup_iocb(struct kiocb *kiocb)
if (unlikely(!access_ok(VERIFY_WRITE, kiocb->ki_buf,
kiocb->ki_left)))
break;
+ ret = security_file_permission(file, MAY_READ);
+ if (unlikely(ret))
+ break;
ret = -EINVAL;
if (file->f_op->aio_read)
kiocb->ki_retry = aio_pread;
@@ -1409,6 +1412,9 @@ static ssize_t aio_setup_iocb(struct kiocb *kiocb)
if (unlikely(!access_ok(VERIFY_READ, kiocb->ki_buf,
kiocb->ki_left)))
break;
+ ret = security_file_permission(file, MAY_WRITE);
+ if (unlikely(ret))
+ break;
ret = -EINVAL;
if (file->f_op->aio_write)
kiocb->ki_retry = aio_pwrite;
diff --git a/fs/bio.c b/fs/bio.c
index 7d81a93afd4..460554b07ff 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -778,7 +778,7 @@ static int bio_map_kern_endio(struct bio *bio, unsigned int bytes_done, int err)
static struct bio *__bio_map_kern(request_queue_t *q, void *data,
- unsigned int len, unsigned int gfp_mask)
+ unsigned int len, gfp_t gfp_mask)
{
unsigned long kaddr = (unsigned long)data;
unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
@@ -825,7 +825,7 @@ static struct bio *__bio_map_kern(request_queue_t *q, void *data,
* device. Returns an error pointer in case of error.
*/
struct bio *bio_map_kern(request_queue_t *q, void *data, unsigned int len,
- unsigned int gfp_mask)
+ gfp_t gfp_mask)
{
struct bio *bio;
diff --git a/fs/buffer.c b/fs/buffer.c
index 1216c0d3c8c..b1667986442 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -502,7 +502,7 @@ static void free_more_memory(void)
yield();
for_each_pgdat(pgdat) {
- zones = pgdat->node_zonelists[GFP_NOFS&GFP_ZONEMASK].zones;
+ zones = pgdat->node_zonelists[gfp_zone(GFP_NOFS)].zones;
if (*zones)
try_to_free_pages(zones, GFP_NOFS);
}
@@ -1571,7 +1571,7 @@ static inline void discard_buffer(struct buffer_head * bh)
*
* NOTE: @gfp_mask may go away, and this function may become non-blocking.
*/
-int try_to_release_page(struct page *page, int gfp_mask)
+int try_to_release_page(struct page *page, gfp_t gfp_mask)
{
struct address_space * const mapping = page->mapping;
diff --git a/fs/dcache.c b/fs/dcache.c
index fb10386c59b..e90512ed35a 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -689,7 +689,7 @@ void shrink_dcache_anon(struct hlist_head *head)
*
* In this case we return -1 to tell the caller that we baled.
*/
-static int shrink_dcache_memory(int nr, unsigned int gfp_mask)
+static int shrink_dcache_memory(int nr, gfp_t gfp_mask)
{
if (nr) {
if (!(gfp_mask & __GFP_FS))
diff --git a/fs/dquot.c b/fs/dquot.c
index b9732335bcd..05f3327d64a 100644
--- a/fs/dquot.c
+++ b/fs/dquot.c
@@ -500,7 +500,7 @@ static void prune_dqcache(int count)
* more memory
*/
-static int shrink_dqcache_memory(int nr, unsigned int gfp_mask)
+static int shrink_dqcache_memory(int nr, gfp_t gfp_mask)
{
if (nr) {
spin_lock(&dq_list_lock);
diff --git a/fs/exec.c b/fs/exec.c
index a04a575ad43..d2208f7c87d 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -126,8 +126,7 @@ asmlinkage long sys_uselib(const char __user * library)
struct nameidata nd;
int error;
- nd.intent.open.flags = FMODE_READ;
- error = __user_walk(library, LOOKUP_FOLLOW|LOOKUP_OPEN, &nd);
+ error = __user_path_lookup_open(library, LOOKUP_FOLLOW, &nd, FMODE_READ);
if (error)
goto out;
@@ -139,7 +138,7 @@ asmlinkage long sys_uselib(const char __user * library)
if (error)
goto exit;
- file = dentry_open(nd.dentry, nd.mnt, O_RDONLY);
+ file = nameidata_to_filp(&nd, O_RDONLY);
error = PTR_ERR(file);
if (IS_ERR(file))
goto out;
@@ -167,6 +166,7 @@ asmlinkage long sys_uselib(const char __user * library)
out:
return error;
exit:
+ release_open_intent(&nd);
path_release(&nd);
goto out;
}
@@ -490,8 +490,7 @@ struct file *open_exec(const char *name)
int err;
struct file *file;
- nd.intent.open.flags = FMODE_READ;
- err = path_lookup(name, LOOKUP_FOLLOW|LOOKUP_OPEN, &nd);
+ err = path_lookup_open(name, LOOKUP_FOLLOW, &nd, FMODE_READ);
file = ERR_PTR(err);
if (!err) {
@@ -504,7 +503,7 @@ struct file *open_exec(const char *name)
err = -EACCES;
file = ERR_PTR(err);
if (!err) {
- file = dentry_open(nd.dentry, nd.mnt, O_RDONLY);
+ file = nameidata_to_filp(&nd, O_RDONLY);
if (!IS_ERR(file)) {
err = deny_write_access(file);
if (err) {
@@ -516,6 +515,7 @@ out:
return file;
}
}
+ release_open_intent(&nd);
path_release(&nd);
}
goto out;
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index b5177c90d6f..8b38f223279 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -1434,7 +1434,7 @@ static int ext3_invalidatepage(struct page *page, unsigned long offset)
return journal_invalidatepage(journal, page, offset);
}
-static int ext3_releasepage(struct page *page, int wait)
+static int ext3_releasepage(struct page *page, gfp_t wait)
{
journal_t *journal = EXT3_JOURNAL(page->mapping->host);
diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c
index f1570b9f9de..3f680c5675b 100644
--- a/fs/hfs/inode.c
+++ b/fs/hfs/inode.c
@@ -46,7 +46,7 @@ static sector_t hfs_bmap(struct address_space *mapping, sector_t block)
return generic_block_bmap(mapping, block, hfs_get_block);
}
-static int hfs_releasepage(struct page *page, int mask)
+static int hfs_releasepage(struct page *page, gfp_t mask)
{
struct inode *inode = page->mapping->host;
struct super_block *sb = inode->i_sb;
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
index d5642705f63..f205773ddfb 100644
--- a/fs/hfsplus/inode.c
+++ b/fs/hfsplus/inode.c
@@ -40,7 +40,7 @@ static sector_t hfsplus_bmap(struct address_space *mapping, sector_t block)
return generic_block_bmap(mapping, block, hfsplus_get_block);
}
-static int hfsplus_releasepage(struct page *page, int mask)
+static int hfsplus_releasepage(struct page *page, gfp_t mask)
{
struct inode *inode = page->mapping->host;
struct super_block *sb = inode->i_sb;
diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
index fd0f0f050e1..452fc1fdbd3 100644
--- a/fs/hfsplus/super.c
+++ b/fs/hfsplus/super.c
@@ -50,6 +50,7 @@ static void hfsplus_read_inode(struct inode *inode)
init_MUTEX(&HFSPLUS_I(inode).extents_lock);
HFSPLUS_I(inode).flags = 0;
HFSPLUS_I(inode).rsrc_inode = NULL;
+ atomic_set(&HFSPLUS_I(inode).opencnt, 0);
if (inode->i_ino >= HFSPLUS_FIRSTUSER_CNID) {
read_inode:
diff --git a/fs/inode.c b/fs/inode.c
index f80a79ff156..7d331652776 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -475,7 +475,7 @@ static void prune_icache(int nr_to_scan)
* This function is passed the number of inodes to scan, and it returns the
* total number of remaining possibly-reclaimable inodes.
*/
-static int shrink_icache_memory(int nr, unsigned int gfp_mask)
+static int shrink_icache_memory(int nr, gfp_t gfp_mask)
{
if (nr) {
/*
diff --git a/fs/inotify.c b/fs/inotify.c
index a37e9fb1da5..9fbaebfdf40 100644
--- a/fs/inotify.c
+++ b/fs/inotify.c
@@ -176,6 +176,7 @@ static inline void put_inotify_dev(struct inotify_device *dev)
if (atomic_dec_and_test(&dev->count)) {
atomic_dec(&dev->user->inotify_devs);
free_uid(dev->user);
+ idr_destroy(&dev->idr);
kfree(dev);
}
}
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c
index 7ae2c4fe506..e4b516ac498 100644
--- a/fs/jbd/journal.c
+++ b/fs/jbd/journal.c
@@ -1606,7 +1606,7 @@ int journal_blocks_per_page(struct inode *inode)
* Simple support for retrying memory allocations. Introduced to help to
* debug different VM deadlock avoidance strategies.
*/
-void * __jbd_kmalloc (const char *where, size_t size, int flags, int retry)
+void * __jbd_kmalloc (const char *where, size_t size, gfp_t flags, int retry)
{
return kmalloc(size, flags | (retry ? __GFP_NOFAIL : 0));
}
diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c
index 49bbc2be3d7..13cb05bf604 100644
--- a/fs/jbd/transaction.c
+++ b/fs/jbd/transaction.c
@@ -1621,7 +1621,7 @@ out:
* while the data is part of a transaction. Yes?
*/
int journal_try_to_free_buffers(journal_t *journal,
- struct page *page, int unused_gfp_mask)
+ struct page *page, gfp_t unused_gfp_mask)
{
struct buffer_head *head;
struct buffer_head *bh;
diff --git a/fs/jfs/jfs_metapage.c b/fs/jfs/jfs_metapage.c
index 13d7e3f1feb..eeb37d70e65 100644
--- a/fs/jfs/jfs_metapage.c
+++ b/fs/jfs/jfs_metapage.c
@@ -198,7 +198,7 @@ static void init_once(void *foo, kmem_cache_t *cachep, unsigned long flags)
}
}
-static inline struct metapage *alloc_metapage(unsigned int gfp_mask)
+static inline struct metapage *alloc_metapage(gfp_t gfp_mask)
{
return mempool_alloc(metapage_mempool, gfp_mask);
}
@@ -534,7 +534,7 @@ add_failed:
return -EIO;
}
-static int metapage_releasepage(struct page *page, int gfp_mask)
+static int metapage_releasepage(struct page *page, gfp_t gfp_mask)
{
struct metapage *mp;
int busy = 0;
diff --git a/fs/lockd/host.c b/fs/lockd/host.c
index 82c77df81c5..c4c8601096e 100644
--- a/fs/lockd/host.c
+++ b/fs/lockd/host.c
@@ -173,11 +173,10 @@ nlm_bind_host(struct nlm_host *host)
/* If we've already created an RPC client, check whether
* RPC rebind is required
- * Note: why keep rebinding if we're on a tcp connection?
*/
if ((clnt = host->h_rpcclnt) != NULL) {
xprt = clnt->cl_xprt;
- if (!xprt->stream && time_after_eq(jiffies, host->h_nextrebind)) {
+ if (time_after_eq(jiffies, host->h_nextrebind)) {
clnt->cl_port = 0;
host->h_nextrebind = jiffies + NLM_HOST_REBIND;
dprintk("lockd: next rebind in %ld jiffies\n",
@@ -189,7 +188,6 @@ nlm_bind_host(struct nlm_host *host)
goto forgetit;
xprt_set_timeout(&xprt->timeout, 5, nlmsvc_timeout);
- xprt->nocong = 1; /* No congestion control for NLM */
xprt->resvport = 1; /* NLM requires a reserved port */
/* Existing NLM servers accept AUTH_UNIX only */
diff --git a/fs/locks.c b/fs/locks.c
index f7daa5f4894..a1e8b224801 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -316,21 +316,22 @@ static int flock_to_posix_lock(struct file *filp, struct file_lock *fl,
/* POSIX-1996 leaves the case l->l_len < 0 undefined;
POSIX-2001 defines it. */
start += l->l_start;
- end = start + l->l_len - 1;
- if (l->l_len < 0) {
+ if (start < 0)
+ return -EINVAL;
+ fl->fl_end = OFFSET_MAX;
+ if (l->l_len > 0) {
+ end = start + l->l_len - 1;
+ fl->fl_end = end;
+ } else if (l->l_len < 0) {
end = start - 1;
+ fl->fl_end = end;
start += l->l_len;
+ if (start < 0)
+ return -EINVAL;
}
-
- if (start < 0)
- return -EINVAL;
- if (l->l_len > 0 && end < 0)
- return -EOVERFLOW;
-
fl->fl_start = start; /* we record the absolute position */
- fl->fl_end = end;
- if (l->l_len == 0)
- fl->fl_end = OFFSET_MAX;
+ if (fl->fl_end < fl->fl_start)
+ return -EOVERFLOW;
fl->fl_owner = current->files;
fl->fl_pid = current->tgid;
@@ -362,14 +363,21 @@ static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl,
return -EINVAL;
}
- if (((start += l->l_start) < 0) || (l->l_len < 0))
+ start += l->l_start;
+ if (start < 0)
return -EINVAL;
- fl->fl_end = start + l->l_len - 1;
- if (l->l_len > 0 && fl->fl_end < 0)
- return -EOVERFLOW;
+ fl->fl_end = OFFSET_MAX;
+ if (l->l_len > 0) {
+ fl->fl_end = start + l->l_len - 1;
+ } else if (l->l_len < 0) {
+ fl->fl_end = start - 1;
+ start += l->l_len;
+ if (start < 0)
+ return -EINVAL;
+ }
fl->fl_start = start; /* we record the absolute position */
- if (l->l_len == 0)
- fl->fl_end = OFFSET_MAX;
+ if (fl->fl_end < fl->fl_start)
+ return -EOVERFLOW;
fl->fl_owner = current->files;
fl->fl_pid = current->tgid;
@@ -829,12 +837,16 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request)
/* Detect adjacent or overlapping regions (if same lock type)
*/
if (request->fl_type == fl->fl_type) {
+ /* In all comparisons of start vs end, use
+ * "start - 1" rather than "end + 1". If end
+ * is OFFSET_MAX, end + 1 will become negative.
+ */
if (fl->fl_end < request->fl_start - 1)
goto next_lock;
/* If the next lock in the list has entirely bigger
* addresses than the new one, insert the lock here.
*/
- if (fl->fl_start > request->fl_end + 1)
+ if (fl->fl_start - 1 > request->fl_end)
break;
/* If we come here, the new and old lock are of the
diff --git a/fs/mbcache.c b/fs/mbcache.c
index b002a088857..298997f1747 100644
--- a/fs/mbcache.c
+++ b/fs/mbcache.c
@@ -116,7 +116,7 @@ mb_cache_indexes(struct mb_cache *cache)
* What the mbcache registers as to get shrunk dynamically.
*/
-static int mb_cache_shrink_fn(int nr_to_scan, unsigned int gfp_mask);
+static int mb_cache_shrink_fn(int nr_to_scan, gfp_t gfp_mask);
static inline int
@@ -140,7 +140,7 @@ __mb_cache_entry_unhash(struct mb_cache_entry *ce)
static inline void
-__mb_cache_entry_forget(struct mb_cache_entry *ce, int gfp_mask)
+__mb_cache_entry_forget(struct mb_cache_entry *ce, gfp_t gfp_mask)
{
struct mb_cache *cache = ce->e_cache;
@@ -193,7 +193,7 @@ forget:
* Returns the number of objects which are present in the cache.
*/
static int
-mb_cache_shrink_fn(int nr_to_scan, unsigned int gfp_mask)
+mb_cache_shrink_fn(int nr_to_scan, gfp_t gfp_mask)
{
LIST_HEAD(free_list);
struct list_head *l, *ltmp;
diff --git a/fs/namei.c b/fs/namei.c
index aa62dbda93a..aaaa8103623 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -28,6 +28,7 @@
#include <linux/syscalls.h>
#include <linux/mount.h>
#include <linux/audit.h>
+#include <linux/file.h>
#include <asm/namei.h>
#include <asm/uaccess.h>
@@ -317,6 +318,18 @@ void path_release_on_umount(struct nameidata *nd)
mntput_no_expire(nd->mnt);
}
+/**
+ * release_open_intent - free up open intent resources
+ * @nd: pointer to nameidata
+ */
+void release_open_intent(struct nameidata *nd)
+{
+ if (nd->intent.open.file->f_dentry == NULL)
+ put_filp(nd->intent.open.file);
+ else
+ fput(nd->intent.open.file);
+}
+
/*
* Internal lookup() using the new generic dcache.
* SMP-safe
@@ -750,6 +763,7 @@ static fastcall int __link_path_walk(const char * name, struct nameidata *nd)
struct qstr this;
unsigned int c;
+ nd->flags |= LOOKUP_CONTINUE;
err = exec_permission_lite(inode, nd);
if (err == -EAGAIN) {
err = permission(inode, MAY_EXEC, nd);
@@ -802,7 +816,6 @@ static fastcall int __link_path_walk(const char * name, struct nameidata *nd)
if (err < 0)
break;
}
- nd->flags |= LOOKUP_CONTINUE;
/* This does the actual lookups.. */
err = do_lookup(nd, &this, &next);
if (err)
@@ -1052,6 +1065,70 @@ out:
return retval;
}
+static int __path_lookup_intent_open(const char *name, unsigned int lookup_flags,
+ struct nameidata *nd, int open_flags, int create_mode)
+{
+ struct file *filp = get_empty_filp();
+ int err;
+
+ if (filp == NULL)
+ return -ENFILE;
+ nd->intent.open.file = filp;
+ nd->intent.open.flags = open_flags;
+ nd->intent.open.create_mode = create_mode;
+ err = path_lookup(name, lookup_flags|LOOKUP_OPEN, nd);
+ if (IS_ERR(nd->intent.open.file)) {
+ if (err == 0) {
+ err = PTR_ERR(nd->intent.open.file);
+ path_release(nd);
+ }
+ } else if (err != 0)
+ release_open_intent(nd);
+ return err;
+}
+
+/**
+ * path_lookup_open - lookup a file path with open intent
+ * @name: pointer to file name
+ * @lookup_flags: lookup intent flags
+ * @nd: pointer to nameidata
+ * @open_flags: open intent flags
+ */
+int path_lookup_open(const char *name, unsigned int lookup_flags,
+ struct nameidata *nd, int open_flags)
+{
+ return __path_lookup_intent_open(name, lookup_flags, nd,
+ open_flags, 0);
+}
+
+/**
+ * path_lookup_create - lookup a file path with open + create intent
+ * @name: pointer to file name
+ * @lookup_flags: lookup intent flags
+ * @nd: pointer to nameidata
+ * @open_flags: open intent flags
+ * @create_mode: create intent flags
+ */
+int path_lookup_create(const char *name, unsigned int lookup_flags,
+ struct nameidata *nd, int open_flags, int create_mode)
+{
+ return __path_lookup_intent_open(name, lookup_flags|LOOKUP_CREATE, nd,
+ open_flags, create_mode);
+}
+
+int __user_path_lookup_open(const char __user *name, unsigned int lookup_flags,
+ struct nameidata *nd, int open_flags)
+{
+ char *tmp = getname(name);
+ int err = PTR_ERR(tmp);
+
+ if (!IS_ERR(tmp)) {
+ err = __path_lookup_intent_open(tmp, lookup_flags, nd, open_flags, 0);
+ putname(tmp);
+ }
+ return err;
+}
+
/*
* Restricted form of lookup. Doesn't follow links, single-component only,
* needs parent already locked. Doesn't follow mounts.
@@ -1416,27 +1493,27 @@ int may_open(struct nameidata *nd, int acc_mode, int flag)
*/
int open_namei(const char * pathname, int flag, int mode, struct nameidata *nd)
{
- int acc_mode, error = 0;
+ int acc_mode, error;
struct path path;
struct dentry *dir;
int count = 0;
acc_mode = ACC_MODE(flag);
+ /* O_TRUNC implies we need access checks for write permissions */
+ if (flag & O_TRUNC)
+ acc_mode |= MAY_WRITE;
+
/* Allow the LSM permission hook to distinguish append
access from general write access. */
if (flag & O_APPEND)
acc_mode |= MAY_APPEND;
- /* Fill in the open() intent data */
- nd->intent.open.flags = flag;
- nd->intent.open.create_mode = mode;
-
/*
* The simplest case - just a plain lookup.
*/
if (!(flag & O_CREAT)) {
- error = path_lookup(pathname, lookup_flags(flag)|LOOKUP_OPEN, nd);
+ error = path_lookup_open(pathname, lookup_flags(flag), nd, flag);
if (error)
return error;
goto ok;
@@ -1445,7 +1522,7 @@ int open_namei(const char * pathname, int flag, int mode, struct nameidata *nd)
/*
* Create - we need to know the parent.
*/
- error = path_lookup(pathname, LOOKUP_PARENT|LOOKUP_OPEN|LOOKUP_CREATE, nd);
+ error = path_lookup_create(pathname, LOOKUP_PARENT, nd, flag, mode);
if (error)
return error;
@@ -1520,6 +1597,8 @@ ok:
exit_dput:
dput_path(&path, nd);
exit:
+ if (!IS_ERR(nd->intent.open.file))
+ release_open_intent(nd);
path_release(nd);
return error;
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index 4a36839f0bb..44135af9894 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -142,7 +142,7 @@ static void nfs_msync_inode(struct inode *inode)
/*
* Basic procedure for returning a delegation to the server
*/
-int nfs_inode_return_delegation(struct inode *inode)
+int __nfs_inode_return_delegation(struct inode *inode)
{
struct nfs4_client *clp = NFS_SERVER(inode)->nfs4_state;
struct nfs_inode *nfsi = NFS_I(inode);
diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h
index 3f6c45a29d6..8017846b561 100644
--- a/fs/nfs/delegation.h
+++ b/fs/nfs/delegation.h
@@ -25,7 +25,7 @@ struct nfs_delegation {
int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res);
void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res);
-int nfs_inode_return_delegation(struct inode *inode);
+int __nfs_inode_return_delegation(struct inode *inode);
int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid);
struct inode *nfs_delegation_find_inode(struct nfs4_client *clp, const struct nfs_fh *fhandle);
@@ -47,11 +47,25 @@ static inline int nfs_have_delegation(struct inode *inode, int flags)
return 1;
return 0;
}
+
+static inline int nfs_inode_return_delegation(struct inode *inode)
+{
+ int err = 0;
+
+ if (NFS_I(inode)->delegation != NULL)
+ err = __nfs_inode_return_delegation(inode);
+ return err;
+}
#else
static inline int nfs_have_delegation(struct inode *inode, int flags)
{
return 0;
}
+
+static inline int nfs_inode_return_delegation(struct inode *inode)
+{
+ return 0;
+}
#endif
#endif
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 2df639f143e..8272ed3fc70 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -532,6 +532,7 @@ static int nfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
my_entry.eof = 0;
my_entry.fh = &fh;
my_entry.fattr = &fattr;
+ nfs_fattr_init(&fattr);
desc->entry = &my_entry;
while(!desc->entry->eof) {
@@ -565,8 +566,6 @@ static int nfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
}
}
unlock_kernel();
- if (desc->error < 0)
- return desc->error;
if (res < 0)
return res;
return 0;
@@ -803,6 +802,7 @@ static int nfs_dentry_delete(struct dentry *dentry)
*/
static void nfs_dentry_iput(struct dentry *dentry, struct inode *inode)
{
+ nfs_inode_return_delegation(inode);
if (dentry->d_flags & DCACHE_NFSFS_RENAMED) {
lock_kernel();
inode->i_nlink--;
@@ -853,12 +853,6 @@ static struct dentry *nfs_lookup(struct inode *dir, struct dentry * dentry, stru
dentry->d_op = NFS_PROTO(dir)->dentry_ops;
lock_kernel();
- /* Revalidate parent directory attribute cache */
- error = nfs_revalidate_inode(NFS_SERVER(dir), dir);
- if (error < 0) {
- res = ERR_PTR(error);
- goto out_unlock;
- }
/* If we're doing an exclusive create, optimize away the lookup */
if (nfs_is_exclusive_create(dir, nd))
@@ -916,7 +910,6 @@ static int is_atomic_open(struct inode *dir, struct nameidata *nd)
static struct dentry *nfs_atomic_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
{
struct dentry *res = NULL;
- struct inode *inode = NULL;
int error;
/* Check that we are indeed trying to open this file */
@@ -930,8 +923,10 @@ static struct dentry *nfs_atomic_lookup(struct inode *dir, struct dentry *dentry
dentry->d_op = NFS_PROTO(dir)->dentry_ops;
/* Let vfs_create() deal with O_EXCL */
- if (nd->intent.open.flags & O_EXCL)
- goto no_entry;
+ if (nd->intent.open.flags & O_EXCL) {
+ d_add(dentry, NULL);
+ goto out;
+ }
/* Open the file on the server */
lock_kernel();
@@ -945,32 +940,30 @@ static struct dentry *nfs_atomic_lookup(struct inode *dir, struct dentry *dentry
if (nd->intent.open.flags & O_CREAT) {
nfs_begin_data_update(dir);
- inode = nfs4_atomic_open(dir, dentry, nd);
+ res = nfs4_atomic_open(dir, dentry, nd);
nfs_end_data_update(dir);
} else
- inode = nfs4_atomic_open(dir, dentry, nd);
+ res = nfs4_atomic_open(dir, dentry, nd);
unlock_kernel();
- if (IS_ERR(inode)) {
- error = PTR_ERR(inode);
+ if (IS_ERR(res)) {
+ error = PTR_ERR(res);
switch (error) {
/* Make a negative dentry */
case -ENOENT:
- inode = NULL;
- break;
+ res = NULL;
+ goto out;
/* This turned out not to be a regular file */
+ case -EISDIR:
+ case -ENOTDIR:
+ goto no_open;
case -ELOOP:
if (!(nd->intent.open.flags & O_NOFOLLOW))
goto no_open;
- /* case -EISDIR: */
/* case -EINVAL: */
default:
- res = ERR_PTR(error);
goto out;
}
- }
-no_entry:
- res = d_add_unique(dentry, inode);
- if (res != NULL)
+ } else if (res != NULL)
dentry = res;
nfs_renew_times(dentry);
nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
@@ -1014,7 +1007,7 @@ static int nfs_open_revalidate(struct dentry *dentry, struct nameidata *nd)
*/
lock_kernel();
verifier = nfs_save_change_attribute(dir);
- ret = nfs4_open_revalidate(dir, dentry, openflags);
+ ret = nfs4_open_revalidate(dir, dentry, openflags, nd);
if (!ret)
nfs_set_verifier(dentry, verifier);
unlock_kernel();
@@ -1137,7 +1130,7 @@ static int nfs_create(struct inode *dir, struct dentry *dentry, int mode,
lock_kernel();
nfs_begin_data_update(dir);
- error = NFS_PROTO(dir)->create(dir, dentry, &attr, open_flags);
+ error = NFS_PROTO(dir)->create(dir, dentry, &attr, open_flags, nd);
nfs_end_data_update(dir);
if (error != 0)
goto out_err;
@@ -1332,6 +1325,7 @@ static int nfs_safe_remove(struct dentry *dentry)
nfs_begin_data_update(dir);
if (inode != NULL) {
+ nfs_inode_return_delegation(inode);
nfs_begin_data_update(inode);
error = NFS_PROTO(dir)->remove(dir, &dentry->d_name);
/* The VFS may want to delete this inode */
@@ -1438,17 +1432,14 @@ nfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
dentry->d_parent->d_name.name, dentry->d_name.name);
- /*
- * Drop the dentry in advance to force a new lookup.
- * Since nfs_proc_link doesn't return a file handle,
- * we can't use the existing dentry.
- */
lock_kernel();
- d_drop(dentry);
-
nfs_begin_data_update(dir);
nfs_begin_data_update(inode);
error = NFS_PROTO(dir)->link(inode, dir, &dentry->d_name);
+ if (error == 0) {
+ atomic_inc(&inode->i_count);
+ d_instantiate(dentry, inode);
+ }
nfs_end_data_update(inode);
nfs_end_data_update(dir);
unlock_kernel();
@@ -1512,9 +1503,11 @@ static int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
*/
if (!new_inode)
goto go_ahead;
- if (S_ISDIR(new_inode->i_mode))
- goto out;
- else if (atomic_read(&new_dentry->d_count) > 2) {
+ if (S_ISDIR(new_inode->i_mode)) {
+ error = -EISDIR;
+ if (!S_ISDIR(old_inode->i_mode))
+ goto out;
+ } else if (atomic_read(&new_dentry->d_count) > 2) {
int err;
/* copy the target dentry's name */
dentry = d_alloc(new_dentry->d_parent,
@@ -1539,7 +1532,8 @@ static int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
#endif
goto out;
}
- }
+ } else
+ new_inode->i_nlink--;
go_ahead:
/*
@@ -1549,6 +1543,7 @@ go_ahead:
nfs_wb_all(old_inode);
shrink_dcache_parent(old_dentry);
}
+ nfs_inode_return_delegation(old_inode);
if (new_inode)
d_delete(new_dentry);
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 6bdcfa95de9..57d3e77d97e 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -205,8 +205,8 @@ nfs_file_flush(struct file *file)
if (!status) {
status = ctx->error;
ctx->error = 0;
- if (!status && !nfs_have_delegation(inode, FMODE_READ))
- __nfs_revalidate_inode(NFS_SERVER(inode), inode);
+ if (!status)
+ nfs_revalidate_inode(NFS_SERVER(inode), inode);
}
unlock_kernel();
return status;
@@ -376,22 +376,31 @@ out_swapfile:
static int do_getlk(struct file *filp, int cmd, struct file_lock *fl)
{
+ struct file_lock *cfl;
struct inode *inode = filp->f_mapping->host;
int status = 0;
lock_kernel();
- /* Use local locking if mounted with "-onolock" */
- if (!(NFS_SERVER(inode)->flags & NFS_MOUNT_NONLM))
- status = NFS_PROTO(inode)->lock(filp, cmd, fl);
- else {
- struct file_lock *cfl = posix_test_lock(filp, fl);
-
- fl->fl_type = F_UNLCK;
- if (cfl != NULL)
- memcpy(fl, cfl, sizeof(*fl));
+ /* Try local locking first */
+ cfl = posix_test_lock(filp, fl);
+ if (cfl != NULL) {
+ locks_copy_lock(fl, cfl);
+ goto out;
}
+
+ if (nfs_have_delegation(inode, FMODE_READ))
+ goto out_noconflict;
+
+ if (NFS_SERVER(inode)->flags & NFS_MOUNT_NONLM)
+ goto out_noconflict;
+
+ status = NFS_PROTO(inode)->lock(filp, cmd, fl);
+out:
unlock_kernel();
return status;
+out_noconflict:
+ fl->fl_type = F_UNLCK;
+ goto out;
}
static int do_vfs_lock(struct file *file, struct file_lock *fl)
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index d4eadeea128..f2781ca4276 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -358,6 +358,35 @@ out_no_root:
return no_root_error;
}
+static void nfs_init_timeout_values(struct rpc_timeout *to, int proto, unsigned int timeo, unsigned int retrans)
+{
+ to->to_initval = timeo * HZ / 10;
+ to->to_retries = retrans;
+ if (!to->to_retries)
+ to->to_retries = 2;
+
+ switch (proto) {
+ case IPPROTO_TCP:
+ if (!to->to_initval)
+ to->to_initval = 60 * HZ;
+ if (to->to_initval > NFS_MAX_TCP_TIMEOUT)
+ to->to_initval = NFS_MAX_TCP_TIMEOUT;
+ to->to_increment = to->to_initval;
+ to->to_maxval = to->to_initval + (to->to_increment * to->to_retries);
+ to->to_exponential = 0;
+ break;
+ case IPPROTO_UDP:
+ default:
+ if (!to->to_initval)
+ to->to_initval = 11 * HZ / 10;
+ if (to->to_initval > NFS_MAX_UDP_TIMEOUT)
+ to->to_initval = NFS_MAX_UDP_TIMEOUT;
+ to->to_maxval = NFS_MAX_UDP_TIMEOUT;
+ to->to_exponential = 1;
+ break;
+ }
+}
+
/*
* Create an RPC client handle.
*/
@@ -367,22 +396,12 @@ nfs_create_client(struct nfs_server *server, const struct nfs_mount_data *data)
struct rpc_timeout timeparms;
struct rpc_xprt *xprt = NULL;
struct rpc_clnt *clnt = NULL;
- int tcp = (data->flags & NFS_MOUNT_TCP);
-
- /* Initialize timeout values */
- timeparms.to_initval = data->timeo * HZ / 10;
- timeparms.to_retries = data->retrans;
- timeparms.to_maxval = tcp ? RPC_MAX_TCP_TIMEOUT : RPC_MAX_UDP_TIMEOUT;
- timeparms.to_exponential = 1;
+ int proto = (data->flags & NFS_MOUNT_TCP) ? IPPROTO_TCP : IPPROTO_UDP;
- if (!timeparms.to_initval)
- timeparms.to_initval = (tcp ? 600 : 11) * HZ / 10;
- if (!timeparms.to_retries)
- timeparms.to_retries = 5;
+ nfs_init_timeout_values(&timeparms, proto, data->timeo, data->retrans);
/* create transport and client */
- xprt = xprt_create_proto(tcp ? IPPROTO_TCP : IPPROTO_UDP,
- &server->addr, &timeparms);
+ xprt = xprt_create_proto(proto, &server->addr, &timeparms);
if (IS_ERR(xprt)) {
dprintk("%s: cannot create RPC transport. Error = %ld\n",
__FUNCTION__, PTR_ERR(xprt));
@@ -576,7 +595,6 @@ static int nfs_show_options(struct seq_file *m, struct vfsmount *mnt)
{ NFS_MOUNT_SOFT, ",soft", ",hard" },
{ NFS_MOUNT_INTR, ",intr", "" },
{ NFS_MOUNT_POSIX, ",posix", "" },
- { NFS_MOUNT_TCP, ",tcp", ",udp" },
{ NFS_MOUNT_NOCTO, ",nocto", "" },
{ NFS_MOUNT_NOAC, ",noac", "" },
{ NFS_MOUNT_NONLM, ",nolock", ",lock" },
@@ -585,6 +603,8 @@ static int nfs_show_options(struct seq_file *m, struct vfsmount *mnt)
};
struct proc_nfs_info *nfs_infop;
struct nfs_server *nfss = NFS_SB(mnt->mnt_sb);
+ char buf[12];
+ char *proto;
seq_printf(m, ",v%d", nfss->rpc_ops->version);
seq_printf(m, ",rsize=%d", nfss->rsize);
@@ -603,6 +623,18 @@ static int nfs_show_options(struct seq_file *m, struct vfsmount *mnt)
else
seq_puts(m, nfs_infop->nostr);
}
+ switch (nfss->client->cl_xprt->prot) {
+ case IPPROTO_TCP:
+ proto = "tcp";
+ break;
+ case IPPROTO_UDP:
+ proto = "udp";
+ break;
+ default:
+ snprintf(buf, sizeof(buf), "%u", nfss->client->cl_xprt->prot);
+ proto = buf;
+ }
+ seq_printf(m, ",proto=%s", proto);
seq_puts(m, ",addr=");
seq_escape(m, nfss->hostname, " \t\n\\");
return 0;
@@ -753,7 +785,8 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr)
else
init_special_inode(inode, inode->i_mode, fattr->rdev);
- nfsi->read_cache_jiffies = fattr->timestamp;
+ nfsi->read_cache_jiffies = fattr->time_start;
+ nfsi->last_updated = jiffies;
inode->i_atime = fattr->atime;
inode->i_mtime = fattr->mtime;
inode->i_ctime = fattr->ctime;
@@ -821,6 +854,11 @@ nfs_setattr(struct dentry *dentry, struct iattr *attr)
filemap_fdatawait(inode->i_mapping);
nfs_wb_all(inode);
}
+ /*
+ * Return any delegations if we're going to change ACLs
+ */
+ if ((attr->ia_valid & (ATTR_MODE|ATTR_UID|ATTR_GID)) != 0)
+ nfs_inode_return_delegation(inode);
error = NFS_PROTO(inode)->setattr(dentry, &fattr, attr);
if (error == 0)
nfs_refresh_inode(inode, &fattr);
@@ -1019,15 +1057,11 @@ int nfs_open(struct inode *inode, struct file *filp)
ctx->mode = filp->f_mode;
nfs_file_set_open_context(filp, ctx);
put_nfs_open_context(ctx);
- if ((filp->f_mode & FMODE_WRITE) != 0)
- nfs_begin_data_update(inode);
return 0;
}
int nfs_release(struct inode *inode, struct file *filp)
{
- if ((filp->f_mode & FMODE_WRITE) != 0)
- nfs_end_data_update(inode);
nfs_file_clear_open_context(filp);
return 0;
}
@@ -1083,14 +1117,15 @@ __nfs_revalidate_inode(struct nfs_server *server, struct inode *inode)
goto out;
}
+ spin_lock(&inode->i_lock);
status = nfs_update_inode(inode, &fattr, verifier);
if (status) {
+ spin_unlock(&inode->i_lock);
dfprintk(PAGECACHE, "nfs_revalidate_inode: (%s/%Ld) refresh failed, error=%d\n",
inode->i_sb->s_id,
(long long)NFS_FILEID(inode), status);
goto out;
}
- spin_lock(&inode->i_lock);
cache_validity = nfsi->cache_validity;
nfsi->cache_validity &= ~NFS_INO_REVAL_PAGECACHE;
@@ -1098,7 +1133,7 @@ __nfs_revalidate_inode(struct nfs_server *server, struct inode *inode)
* We may need to keep the attributes marked as invalid if
* we raced with nfs_end_attr_update().
*/
- if (verifier == nfsi->cache_change_attribute)
+ if (time_after_eq(verifier, nfsi->cache_change_attribute))
nfsi->cache_validity &= ~(NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ATIME);
spin_unlock(&inode->i_lock);
@@ -1165,7 +1200,7 @@ void nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping)
if (S_ISDIR(inode->i_mode)) {
memset(nfsi->cookieverf, 0, sizeof(nfsi->cookieverf));
/* This ensures we revalidate child dentries */
- nfsi->cache_change_attribute++;
+ nfsi->cache_change_attribute = jiffies;
}
spin_unlock(&inode->i_lock);
@@ -1197,20 +1232,19 @@ void nfs_end_data_update(struct inode *inode)
struct nfs_inode *nfsi = NFS_I(inode);
if (!nfs_have_delegation(inode, FMODE_READ)) {
- /* Mark the attribute cache for revalidation */
- spin_lock(&inode->i_lock);
- nfsi->cache_validity |= NFS_INO_INVALID_ATTR;
- /* Directories and symlinks: invalidate page cache too */
- if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
+ /* Directories and symlinks: invalidate page cache */
+ if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) {
+ spin_lock(&inode->i_lock);
nfsi->cache_validity |= NFS_INO_INVALID_DATA;
- spin_unlock(&inode->i_lock);
+ spin_unlock(&inode->i_lock);
+ }
}
- nfsi->cache_change_attribute ++;
+ nfsi->cache_change_attribute = jiffies;
atomic_dec(&nfsi->data_updates);
}
/**
- * nfs_refresh_inode - verify consistency of the inode attribute cache
+ * nfs_check_inode_attributes - verify consistency of the inode attribute cache
* @inode - pointer to inode
* @fattr - updated attributes
*
@@ -1218,13 +1252,12 @@ void nfs_end_data_update(struct inode *inode)
* so that fattr carries weak cache consistency data, then it may
* also update the ctime/mtime/change_attribute.
*/
-int nfs_refresh_inode(struct inode *inode, struct nfs_fattr *fattr)
+static int nfs_check_inode_attributes(struct inode *inode, struct nfs_fattr *fattr)
{
struct nfs_inode *nfsi = NFS_I(inode);
loff_t cur_size, new_isize;
int data_unstable;
- spin_lock(&inode->i_lock);
/* Are we in the process of updating data on the server? */
data_unstable = nfs_caches_unstable(inode);
@@ -1288,11 +1321,67 @@ int nfs_refresh_inode(struct inode *inode, struct nfs_fattr *fattr)
if (!timespec_equal(&inode->i_atime, &fattr->atime))
nfsi->cache_validity |= NFS_INO_INVALID_ATIME;
- nfsi->read_cache_jiffies = fattr->timestamp;
- spin_unlock(&inode->i_lock);
+ nfsi->read_cache_jiffies = fattr->time_start;
return 0;
}
+/**
+ * nfs_refresh_inode - try to update the inode attribute cache
+ * @inode - pointer to inode
+ * @fattr - updated attributes
+ *
+ * Check that an RPC call that returned attributes has not overlapped with
+ * other recent updates of the inode metadata, then decide whether it is
+ * safe to do a full update of the inode attributes, or whether just to
+ * call nfs_check_inode_attributes.
+ */
+int nfs_refresh_inode(struct inode *inode, struct nfs_fattr *fattr)
+{
+ struct nfs_inode *nfsi = NFS_I(inode);
+ int status;
+
+ if ((fattr->valid & NFS_ATTR_FATTR) == 0)
+ return 0;
+ spin_lock(&inode->i_lock);
+ nfsi->cache_validity &= ~NFS_INO_REVAL_PAGECACHE;
+ if (nfs_verify_change_attribute(inode, fattr->time_start))
+ nfsi->cache_validity &= ~(NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ATIME);
+ if (time_after(fattr->time_start, nfsi->last_updated))
+ status = nfs_update_inode(inode, fattr, fattr->time_start);
+ else
+ status = nfs_check_inode_attributes(inode, fattr);
+
+ spin_unlock(&inode->i_lock);
+ return status;
+}
+
+/**
+ * nfs_post_op_update_inode - try to update the inode attribute cache
+ * @inode - pointer to inode
+ * @fattr - updated attributes
+ *
+ * After an operation that has changed the inode metadata, mark the
+ * attribute cache as being invalid, then try to update it.
+ */
+int nfs_post_op_update_inode(struct inode *inode, struct nfs_fattr *fattr)
+{
+ struct nfs_inode *nfsi = NFS_I(inode);
+ int status = 0;
+
+ spin_lock(&inode->i_lock);
+ if (unlikely((fattr->valid & NFS_ATTR_FATTR) == 0)) {
+ nfsi->cache_validity |= NFS_INO_INVALID_ATTR | NFS_INO_INVALID_ACCESS;
+ goto out;
+ }
+ status = nfs_update_inode(inode, fattr, fattr->time_start);
+ if (time_after_eq(fattr->time_start, nfsi->cache_change_attribute))
+ nfsi->cache_validity &= ~(NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ATIME|NFS_INO_REVAL_PAGECACHE);
+ nfsi->cache_change_attribute = jiffies;
+out:
+ spin_unlock(&inode->i_lock);
+ return status;
+}
+
/*
* Many nfs protocol calls return the new file attributes after
* an operation. Here we update the inode to reflect the state
@@ -1328,20 +1417,17 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr, unsign
goto out_err;
}
- spin_lock(&inode->i_lock);
-
/*
* Make sure the inode's type hasn't changed.
*/
- if ((inode->i_mode & S_IFMT) != (fattr->mode & S_IFMT)) {
- spin_unlock(&inode->i_lock);
+ if ((inode->i_mode & S_IFMT) != (fattr->mode & S_IFMT))
goto out_changed;
- }
/*
* Update the read time so we don't revalidate too often.
*/
- nfsi->read_cache_jiffies = fattr->timestamp;
+ nfsi->read_cache_jiffies = fattr->time_start;
+ nfsi->last_updated = jiffies;
/* Are we racing with known updates of the metadata on the server? */
data_unstable = ! (nfs_verify_change_attribute(inode, verifier) ||
@@ -1354,7 +1440,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr, unsign
/* Do we perhaps have any outstanding writes? */
if (nfsi->npages == 0) {
/* No, but did we race with nfs_end_data_update()? */
- if (verifier == nfsi->cache_change_attribute) {
+ if (time_after_eq(verifier, nfsi->cache_change_attribute)) {
inode->i_size = new_isize;
invalid |= NFS_INO_INVALID_DATA;
}
@@ -1430,7 +1516,6 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr, unsign
if (!nfs_have_delegation(inode, FMODE_READ))
nfsi->cache_validity |= invalid;
- spin_unlock(&inode->i_lock);
return 0;
out_changed:
/*
@@ -1639,8 +1724,7 @@ static void nfs4_clear_inode(struct inode *inode)
struct nfs_inode *nfsi = NFS_I(inode);
/* If we are holding a delegation, return it! */
- if (nfsi->delegation != NULL)
- nfs_inode_return_delegation(inode);
+ nfs_inode_return_delegation(inode);
/* First call standard NFS clear_inode() code */
nfs_clear_inode(inode);
/* Now clear out any remaining state */
@@ -1669,7 +1753,7 @@ static int nfs4_fill_super(struct super_block *sb, struct nfs4_mount_data *data,
struct rpc_clnt *clnt = NULL;
struct rpc_timeout timeparms;
rpc_authflavor_t authflavour;
- int proto, err = -EIO;
+ int err = -EIO;
sb->s_blocksize_bits = 0;
sb->s_blocksize = 0;
@@ -1687,30 +1771,8 @@ static int nfs4_fill_super(struct super_block *sb, struct nfs4_mount_data *data,
server->acdirmax = data->acdirmax*HZ;
server->rpc_ops = &nfs_v4_clientops;
- /* Initialize timeout values */
-
- timeparms.to_initval = data->timeo * HZ / 10;
- timeparms.to_retries = data->retrans;
- timeparms.to_exponential = 1;
- if (!timeparms.to_retries)
- timeparms.to_retries = 5;
- proto = data->proto;
- /* Which IP protocol do we use? */
- switch (proto) {
- case IPPROTO_TCP:
- timeparms.to_maxval = RPC_MAX_TCP_TIMEOUT;
- if (!timeparms.to_initval)
- timeparms.to_initval = 600 * HZ / 10;
- break;
- case IPPROTO_UDP:
- timeparms.to_maxval = RPC_MAX_UDP_TIMEOUT;
- if (!timeparms.to_initval)
- timeparms.to_initval = 11 * HZ / 10;
- break;
- default:
- return -EINVAL;
- }
+ nfs_init_timeout_values(&timeparms, data->proto, data->timeo, data->retrans);
clp = nfs4_get_client(&server->addr.sin_addr);
if (!clp) {
@@ -1735,7 +1797,7 @@ static int nfs4_fill_super(struct super_block *sb, struct nfs4_mount_data *data,
down_write(&clp->cl_sem);
if (IS_ERR(clp->cl_rpcclient)) {
- xprt = xprt_create_proto(proto, &server->addr, &timeparms);
+ xprt = xprt_create_proto(data->proto, &server->addr, &timeparms);
if (IS_ERR(xprt)) {
up_write(&clp->cl_sem);
err = PTR_ERR(xprt);
diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c
index d91b69044a4..59049e864ca 100644
--- a/fs/nfs/nfs2xdr.c
+++ b/fs/nfs/nfs2xdr.c
@@ -143,7 +143,6 @@ xdr_decode_fattr(u32 *p, struct nfs_fattr *fattr)
fattr->mode = (fattr->mode & ~S_IFMT) | S_IFIFO;
fattr->rdev = 0;
}
- fattr->timestamp = jiffies;
return p;
}
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index edc95514046..92c870d19cc 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -78,7 +78,7 @@ nfs3_proc_get_root(struct nfs_server *server, struct nfs_fh *fhandle,
int status;
dprintk("%s: call fsinfo\n", __FUNCTION__);
- info->fattr->valid = 0;
+ nfs_fattr_init(info->fattr);
status = rpc_call(server->client_sys, NFS3PROC_FSINFO, fhandle, info, 0);
dprintk("%s: reply fsinfo: %d\n", __FUNCTION__, status);
if (!(info->fattr->valid & NFS_ATTR_FATTR)) {
@@ -98,7 +98,7 @@ nfs3_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle,
int status;
dprintk("NFS call getattr\n");
- fattr->valid = 0;
+ nfs_fattr_init(fattr);
status = rpc_call(server->client, NFS3PROC_GETATTR,
fhandle, fattr, 0);
dprintk("NFS reply getattr: %d\n", status);
@@ -117,7 +117,7 @@ nfs3_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
int status;
dprintk("NFS call setattr\n");
- fattr->valid = 0;
+ nfs_fattr_init(fattr);
status = rpc_call(NFS_CLIENT(inode), NFS3PROC_SETATTR, &arg, fattr, 0);
if (status == 0)
nfs_setattr_update_inode(inode, sattr);
@@ -143,8 +143,8 @@ nfs3_proc_lookup(struct inode *dir, struct qstr *name,
int status;
dprintk("NFS call lookup %s\n", name->name);
- dir_attr.valid = 0;
- fattr->valid = 0;
+ nfs_fattr_init(&dir_attr);
+ nfs_fattr_init(fattr);
status = rpc_call(NFS_CLIENT(dir), NFS3PROC_LOOKUP, &arg, &res, 0);
if (status >= 0 && !(fattr->valid & NFS_ATTR_FATTR))
status = rpc_call(NFS_CLIENT(dir), NFS3PROC_GETATTR,
@@ -174,7 +174,6 @@ static int nfs3_proc_access(struct inode *inode, struct nfs_access_entry *entry)
int status;
dprintk("NFS call access\n");
- fattr.valid = 0;
if (mode & MAY_READ)
arg.access |= NFS3_ACCESS_READ;
@@ -189,6 +188,7 @@ static int nfs3_proc_access(struct inode *inode, struct nfs_access_entry *entry)
if (mode & MAY_EXEC)
arg.access |= NFS3_ACCESS_EXECUTE;
}
+ nfs_fattr_init(&fattr);
status = rpc_call_sync(NFS_CLIENT(inode), &msg, 0);
nfs_refresh_inode(inode, &fattr);
if (status == 0) {
@@ -217,7 +217,7 @@ static int nfs3_proc_readlink(struct inode *inode, struct page *page,
int status;
dprintk("NFS call readlink\n");
- fattr.valid = 0;
+ nfs_fattr_init(&fattr);
status = rpc_call(NFS_CLIENT(inode), NFS3PROC_READLINK,
&args, &fattr, 0);
nfs_refresh_inode(inode, &fattr);
@@ -240,7 +240,7 @@ static int nfs3_proc_read(struct nfs_read_data *rdata)
dprintk("NFS call read %d @ %Ld\n", rdata->args.count,
(long long) rdata->args.offset);
- fattr->valid = 0;
+ nfs_fattr_init(fattr);
status = rpc_call_sync(NFS_CLIENT(inode), &msg, flags);
if (status >= 0)
nfs_refresh_inode(inode, fattr);
@@ -263,10 +263,10 @@ static int nfs3_proc_write(struct nfs_write_data *wdata)
dprintk("NFS call write %d @ %Ld\n", wdata->args.count,
(long long) wdata->args.offset);
- fattr->valid = 0;
+ nfs_fattr_init(fattr);
status = rpc_call_sync(NFS_CLIENT(inode), &msg, rpcflags);
if (status >= 0)
- nfs_refresh_inode(inode, fattr);
+ nfs_post_op_update_inode(inode, fattr);
dprintk("NFS reply write: %d\n", status);
return status < 0? status : wdata->res.count;
}
@@ -285,10 +285,10 @@ static int nfs3_proc_commit(struct nfs_write_data *cdata)
dprintk("NFS call commit %d @ %Ld\n", cdata->args.count,
(long long) cdata->args.offset);
- fattr->valid = 0;
+ nfs_fattr_init(fattr);
status = rpc_call_sync(NFS_CLIENT(inode), &msg, 0);
if (status >= 0)
- nfs_refresh_inode(inode, fattr);
+ nfs_post_op_update_inode(inode, fattr);
dprintk("NFS reply commit: %d\n", status);
return status;
}
@@ -299,7 +299,7 @@ static int nfs3_proc_commit(struct nfs_write_data *cdata)
*/
static int
nfs3_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
- int flags)
+ int flags, struct nameidata *nd)
{
struct nfs_fh fhandle;
struct nfs_fattr fattr;
@@ -329,10 +329,10 @@ nfs3_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
sattr->ia_mode &= ~current->fs->umask;
again:
- dir_attr.valid = 0;
- fattr.valid = 0;
+ nfs_fattr_init(&dir_attr);
+ nfs_fattr_init(&fattr);
status = rpc_call(NFS_CLIENT(dir), NFS3PROC_CREATE, &arg, &res, 0);
- nfs_refresh_inode(dir, &dir_attr);
+ nfs_post_op_update_inode(dir, &dir_attr);
/* If the server doesn't support the exclusive creation semantics,
* try again with simple 'guarded' mode. */
@@ -401,9 +401,9 @@ nfs3_proc_remove(struct inode *dir, struct qstr *name)
int status;
dprintk("NFS call remove %s\n", name->name);
- dir_attr.valid = 0;
+ nfs_fattr_init(&dir_attr);
status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
- nfs_refresh_inode(dir, &dir_attr);
+ nfs_post_op_update_inode(dir, &dir_attr);
dprintk("NFS reply remove: %d\n", status);
return status;
}
@@ -422,7 +422,7 @@ nfs3_proc_unlink_setup(struct rpc_message *msg, struct dentry *dir, struct qstr
ptr->arg.fh = NFS_FH(dir->d_inode);
ptr->arg.name = name->name;
ptr->arg.len = name->len;
- ptr->res.valid = 0;
+ nfs_fattr_init(&ptr->res);
msg->rpc_proc = &nfs3_procedures[NFS3PROC_REMOVE];
msg->rpc_argp = &ptr->arg;
msg->rpc_resp = &ptr->res;
@@ -439,7 +439,7 @@ nfs3_proc_unlink_done(struct dentry *dir, struct rpc_task *task)
return 1;
if (msg->rpc_argp) {
dir_attr = (struct nfs_fattr*)msg->rpc_resp;
- nfs_refresh_inode(dir->d_inode, dir_attr);
+ nfs_post_op_update_inode(dir->d_inode, dir_attr);
kfree(msg->rpc_argp);
}
return 0;
@@ -465,11 +465,11 @@ nfs3_proc_rename(struct inode *old_dir, struct qstr *old_name,
int status;
dprintk("NFS call rename %s -> %s\n", old_name->name, new_name->name);
- old_dir_attr.valid = 0;
- new_dir_attr.valid = 0;
+ nfs_fattr_init(&old_dir_attr);
+ nfs_fattr_init(&new_dir_attr);
status = rpc_call(NFS_CLIENT(old_dir), NFS3PROC_RENAME, &arg, &res, 0);
- nfs_refresh_inode(old_dir, &old_dir_attr);
- nfs_refresh_inode(new_dir, &new_dir_attr);
+ nfs_post_op_update_inode(old_dir, &old_dir_attr);
+ nfs_post_op_update_inode(new_dir, &new_dir_attr);
dprintk("NFS reply rename: %d\n", status);
return status;
}
@@ -491,11 +491,11 @@ nfs3_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
int status;
dprintk("NFS call link %s\n", name->name);
- dir_attr.valid = 0;
- fattr.valid = 0;
+ nfs_fattr_init(&dir_attr);
+ nfs_fattr_init(&fattr);
status = rpc_call(NFS_CLIENT(inode), NFS3PROC_LINK, &arg, &res, 0);
- nfs_refresh_inode(dir, &dir_attr);
- nfs_refresh_inode(inode, &fattr);
+ nfs_post_op_update_inode(dir, &dir_attr);
+ nfs_post_op_update_inode(inode, &fattr);
dprintk("NFS reply link: %d\n", status);
return status;
}
@@ -524,10 +524,10 @@ nfs3_proc_symlink(struct inode *dir, struct qstr *name, struct qstr *path,
if (path->len > NFS3_MAXPATHLEN)
return -ENAMETOOLONG;
dprintk("NFS call symlink %s -> %s\n", name->name, path->name);
- dir_attr.valid = 0;
- fattr->valid = 0;
+ nfs_fattr_init(&dir_attr);
+ nfs_fattr_init(fattr);
status = rpc_call(NFS_CLIENT(dir), NFS3PROC_SYMLINK, &arg, &res, 0);
- nfs_refresh_inode(dir, &dir_attr);
+ nfs_post_op_update_inode(dir, &dir_attr);
dprintk("NFS reply symlink: %d\n", status);
return status;
}
@@ -552,13 +552,13 @@ nfs3_proc_mkdir(struct inode *dir, struct dentry *dentry, struct iattr *sattr)
int status;
dprintk("NFS call mkdir %s\n", dentry->d_name.name);
- dir_attr.valid = 0;
- fattr.valid = 0;
sattr->ia_mode &= ~current->fs->umask;
+ nfs_fattr_init(&dir_attr);
+ nfs_fattr_init(&fattr);
status = rpc_call(NFS_CLIENT(dir), NFS3PROC_MKDIR, &arg, &res, 0);
- nfs_refresh_inode(dir, &dir_attr);
+ nfs_post_op_update_inode(dir, &dir_attr);
if (status != 0)
goto out;
status = nfs_instantiate(dentry, &fhandle, &fattr);
@@ -582,9 +582,9 @@ nfs3_proc_rmdir(struct inode *dir, struct qstr *name)
int status;
dprintk("NFS call rmdir %s\n", name->name);
- dir_attr.valid = 0;
+ nfs_fattr_init(&dir_attr);
status = rpc_call(NFS_CLIENT(dir), NFS3PROC_RMDIR, &arg, &dir_attr, 0);
- nfs_refresh_inode(dir, &dir_attr);
+ nfs_post_op_update_inode(dir, &dir_attr);
dprintk("NFS reply rmdir: %d\n", status);
return status;
}
@@ -634,7 +634,7 @@ nfs3_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
dprintk("NFS call readdir%s %d\n",
plus? "plus" : "", (unsigned int) cookie);
- dir_attr.valid = 0;
+ nfs_fattr_init(&dir_attr);
status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
nfs_refresh_inode(dir, &dir_attr);
dprintk("NFS reply readdir: %d\n", status);
@@ -676,10 +676,10 @@ nfs3_proc_mknod(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
sattr->ia_mode &= ~current->fs->umask;
- dir_attr.valid = 0;
- fattr.valid = 0;
+ nfs_fattr_init(&dir_attr);
+ nfs_fattr_init(&fattr);
status = rpc_call(NFS_CLIENT(dir), NFS3PROC_MKNOD, &arg, &res, 0);
- nfs_refresh_inode(dir, &dir_attr);
+ nfs_post_op_update_inode(dir, &dir_attr);
if (status != 0)
goto out;
status = nfs_instantiate(dentry, &fh, &fattr);
@@ -698,7 +698,7 @@ nfs3_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle,
int status;
dprintk("NFS call fsstat\n");
- stat->fattr->valid = 0;
+ nfs_fattr_init(stat->fattr);
status = rpc_call(server->client, NFS3PROC_FSSTAT, fhandle, stat, 0);
dprintk("NFS reply statfs: %d\n", status);
return status;
@@ -711,7 +711,7 @@ nfs3_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle,
int status;
dprintk("NFS call fsinfo\n");
- info->fattr->valid = 0;
+ nfs_fattr_init(info->fattr);
status = rpc_call(server->client_sys, NFS3PROC_FSINFO, fhandle, info, 0);
dprintk("NFS reply fsinfo: %d\n", status);
return status;
@@ -724,7 +724,7 @@ nfs3_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
int status;
dprintk("NFS call pathconf\n");
- info->fattr->valid = 0;
+ nfs_fattr_init(info->fattr);
status = rpc_call(server->client, NFS3PROC_PATHCONF, fhandle, info, 0);
dprintk("NFS reply pathconf: %d\n", status);
return status;
@@ -735,7 +735,7 @@ extern u32 *nfs3_decode_dirent(u32 *, struct nfs_entry *, int);
static void
nfs3_read_done(struct rpc_task *task)
{
- struct nfs_write_data *data = (struct nfs_write_data *) task->tk_calldata;
+ struct nfs_read_data *data = (struct nfs_read_data *) task->tk_calldata;
if (nfs3_async_handle_jukebox(task))
return;
@@ -775,7 +775,7 @@ nfs3_write_done(struct rpc_task *task)
return;
data = (struct nfs_write_data *)task->tk_calldata;
if (task->tk_status >= 0)
- nfs_refresh_inode(data->inode, data->res.fattr);
+ nfs_post_op_update_inode(data->inode, data->res.fattr);
nfs_writeback_done(task);
}
@@ -819,7 +819,7 @@ nfs3_commit_done(struct rpc_task *task)
return;
data = (struct nfs_write_data *)task->tk_calldata;
if (task->tk_status >= 0)
- nfs_refresh_inode(data->inode, data->res.fattr);
+ nfs_post_op_update_inode(data->inode, data->res.fattr);
nfs_commit_done(task);
}
diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c
index db4a904810a..0498bd36602 100644
--- a/fs/nfs/nfs3xdr.c
+++ b/fs/nfs/nfs3xdr.c
@@ -174,7 +174,6 @@ xdr_decode_fattr(u32 *p, struct nfs_fattr *fattr)
/* Update the mode bits */
fattr->valid |= (NFS_ATTR_FATTR | NFS_ATTR_FATTR_V3);
- fattr->timestamp = jiffies;
return p;
}
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index ec1a22d7b87..78a53f5a9f1 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -93,25 +93,50 @@ struct nfs4_client {
};
/*
+ * struct rpc_sequence ensures that RPC calls are sent in the exact
+ * order that they appear on the list.
+ */
+struct rpc_sequence {
+ struct rpc_wait_queue wait; /* RPC call delay queue */
+ spinlock_t lock; /* Protects the list */
+ struct list_head list; /* Defines sequence of RPC calls */
+};
+
+#define NFS_SEQID_CONFIRMED 1
+struct nfs_seqid_counter {
+ struct rpc_sequence *sequence;
+ int flags;
+ u32 counter;
+};
+
+struct nfs_seqid {
+ struct nfs_seqid_counter *sequence;
+ struct list_head list;
+};
+
+static inline void nfs_confirm_seqid(struct nfs_seqid_counter *seqid, int status)
+{
+ if (seqid_mutating_err(-status))
+ seqid->flags |= NFS_SEQID_CONFIRMED;
+}
+
+/*
* NFS4 state_owners and lock_owners are simply labels for ordered
* sequences of RPC calls. Their sole purpose is to provide once-only
* semantics by allowing the server to identify replayed requests.
- *
- * The ->so_sema is held during all state_owner seqid-mutating operations:
- * OPEN, OPEN_DOWNGRADE, and CLOSE. Its purpose is to properly serialize
- * so_seqid.
*/
struct nfs4_state_owner {
+ spinlock_t so_lock;
struct list_head so_list; /* per-clientid list of state_owners */
struct nfs4_client *so_client;
u32 so_id; /* 32-bit identifier, unique */
- struct semaphore so_sema;
- u32 so_seqid; /* protected by so_sema */
atomic_t so_count;
struct rpc_cred *so_cred; /* Associated cred */
struct list_head so_states;
struct list_head so_delegations;
+ struct nfs_seqid_counter so_seqid;
+ struct rpc_sequence so_sequence;
};
/*
@@ -132,7 +157,7 @@ struct nfs4_lock_state {
fl_owner_t ls_owner; /* POSIX lock owner */
#define NFS_LOCK_INITIALIZED 1
int ls_flags;
- u32 ls_seqid;
+ struct nfs_seqid_counter ls_seqid;
u32 ls_id;
nfs4_stateid ls_stateid;
atomic_t ls_count;
@@ -153,7 +178,6 @@ struct nfs4_state {
struct inode *inode; /* Pointer to the inode */
unsigned long flags; /* Do we hold any locks? */
- struct semaphore lock_sema; /* Serializes file locking operations */
spinlock_t state_lock; /* Protects the lock_states list */
nfs4_stateid stateid;
@@ -191,8 +215,8 @@ extern int nfs4_proc_setclientid_confirm(struct nfs4_client *);
extern int nfs4_proc_async_renew(struct nfs4_client *);
extern int nfs4_proc_renew(struct nfs4_client *);
extern int nfs4_do_close(struct inode *inode, struct nfs4_state *state, mode_t mode);
-extern struct inode *nfs4_atomic_open(struct inode *, struct dentry *, struct nameidata *);
-extern int nfs4_open_revalidate(struct inode *, struct dentry *, int);
+extern struct dentry *nfs4_atomic_open(struct inode *, struct dentry *, struct nameidata *);
+extern int nfs4_open_revalidate(struct inode *, struct dentry *, int, struct nameidata *);
extern struct nfs4_state_recovery_ops nfs4_reboot_recovery_ops;
extern struct nfs4_state_recovery_ops nfs4_network_partition_recovery_ops;
@@ -224,12 +248,17 @@ extern struct nfs4_state * nfs4_get_open_state(struct inode *, struct nfs4_state
extern void nfs4_put_open_state(struct nfs4_state *);
extern void nfs4_close_state(struct nfs4_state *, mode_t);
extern struct nfs4_state *nfs4_find_state(struct inode *, struct rpc_cred *, mode_t mode);
-extern void nfs4_increment_seqid(int status, struct nfs4_state_owner *sp);
extern void nfs4_schedule_state_recovery(struct nfs4_client *);
+extern void nfs4_put_lock_state(struct nfs4_lock_state *lsp);
extern int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl);
-extern void nfs4_increment_lock_seqid(int status, struct nfs4_lock_state *ls);
extern void nfs4_copy_stateid(nfs4_stateid *, struct nfs4_state *, fl_owner_t);
+extern struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter);
+extern int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task);
+extern void nfs_increment_open_seqid(int status, struct nfs_seqid *seqid);
+extern void nfs_increment_lock_seqid(int status, struct nfs_seqid *seqid);
+extern void nfs_free_seqid(struct nfs_seqid *seqid);
+
extern const nfs4_stateid zero_stateid;
/* nfs4xdr.c */
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 9701ca8c942..933e13b383f 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -47,6 +47,7 @@
#include <linux/nfs_page.h>
#include <linux/smp_lock.h>
#include <linux/namei.h>
+#include <linux/mount.h>
#include "nfs4_fs.h"
#include "delegation.h"
@@ -56,10 +57,11 @@
#define NFS4_POLL_RETRY_MIN (1*HZ)
#define NFS4_POLL_RETRY_MAX (15*HZ)
+static int _nfs4_proc_open_confirm(struct rpc_clnt *clnt, const struct nfs_fh *fh, struct nfs4_state_owner *sp, nfs4_stateid *stateid, struct nfs_seqid *seqid);
static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *);
-static int nfs4_async_handle_error(struct rpc_task *, struct nfs_server *);
+static int nfs4_async_handle_error(struct rpc_task *, const struct nfs_server *);
static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry);
-static int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception);
+static int nfs4_handle_exception(const struct nfs_server *server, int errorcode, struct nfs4_exception *exception);
extern u32 *nfs4_decode_dirent(u32 *p, struct nfs_entry *entry, int plus);
extern struct rpc_procinfo nfs4_procedures[];
@@ -185,8 +187,26 @@ static void update_changeattr(struct inode *inode, struct nfs4_change_info *cinf
{
struct nfs_inode *nfsi = NFS_I(inode);
+ spin_lock(&inode->i_lock);
+ nfsi->cache_validity |= NFS_INO_INVALID_ATTR;
if (cinfo->before == nfsi->change_attr && cinfo->atomic)
nfsi->change_attr = cinfo->after;
+ spin_unlock(&inode->i_lock);
+}
+
+/* Helper for asynchronous RPC calls */
+static int nfs4_call_async(struct rpc_clnt *clnt, rpc_action tk_begin,
+ rpc_action tk_exit, void *calldata)
+{
+ struct rpc_task *task;
+
+ if (!(task = rpc_new_task(clnt, tk_exit, RPC_TASK_ASYNC)))
+ return -ENOMEM;
+
+ task->tk_calldata = calldata;
+ task->tk_action = tk_begin;
+ rpc_execute(task);
+ return 0;
}
static void update_open_stateid(struct nfs4_state *state, nfs4_stateid *stateid, int open_flags)
@@ -195,6 +215,7 @@ static void update_open_stateid(struct nfs4_state *state, nfs4_stateid *stateid,
open_flags &= (FMODE_READ|FMODE_WRITE);
/* Protect against nfs4_find_state() */
+ spin_lock(&state->owner->so_lock);
spin_lock(&inode->i_lock);
state->state |= open_flags;
/* NB! List reordering - see the reclaim code for why. */
@@ -204,12 +225,12 @@ static void update_open_stateid(struct nfs4_state *state, nfs4_stateid *stateid,
state->nreaders++;
memcpy(&state->stateid, stateid, sizeof(state->stateid));
spin_unlock(&inode->i_lock);
+ spin_unlock(&state->owner->so_lock);
}
/*
* OPEN_RECLAIM:
* reclaim state on the server after a reboot.
- * Assumes caller is holding the sp->so_sem
*/
static int _nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state)
{
@@ -218,7 +239,6 @@ static int _nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *st
struct nfs_delegation *delegation = NFS_I(inode)->delegation;
struct nfs_openargs o_arg = {
.fh = NFS_FH(inode),
- .seqid = sp->so_seqid,
.id = sp->so_id,
.open_flags = state->state,
.clientid = server->nfs4_state->cl_clientid,
@@ -245,8 +265,13 @@ static int _nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *st
}
o_arg.u.delegation_type = delegation->type;
}
+ o_arg.seqid = nfs_alloc_seqid(&sp->so_seqid);
+ if (o_arg.seqid == NULL)
+ return -ENOMEM;
status = rpc_call_sync(server->client, &msg, RPC_TASK_NOINTR);
- nfs4_increment_seqid(status, sp);
+ /* Confirm the sequence as being established */
+ nfs_confirm_seqid(&sp->so_seqid, status);
+ nfs_increment_open_seqid(status, o_arg.seqid);
if (status == 0) {
memcpy(&state->stateid, &o_res.stateid, sizeof(state->stateid));
if (o_res.delegation_type != 0) {
@@ -256,6 +281,7 @@ static int _nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *st
nfs_async_inode_return_delegation(inode, &o_res.stateid);
}
}
+ nfs_free_seqid(o_arg.seqid);
clear_bit(NFS_DELEGATED_STATE, &state->flags);
/* Ensure we update the inode attributes */
NFS_CACHEINV(inode);
@@ -302,23 +328,35 @@ static int _nfs4_open_delegation_recall(struct dentry *dentry, struct nfs4_state
};
int status = 0;
- down(&sp->so_sema);
if (!test_bit(NFS_DELEGATED_STATE, &state->flags))
goto out;
if (state->state == 0)
goto out;
- arg.seqid = sp->so_seqid;
+ arg.seqid = nfs_alloc_seqid(&sp->so_seqid);
+ status = -ENOMEM;
+ if (arg.seqid == NULL)
+ goto out;
arg.open_flags = state->state;
memcpy(arg.u.delegation.data, state->stateid.data, sizeof(arg.u.delegation.data));
status = rpc_call_sync(server->client, &msg, RPC_TASK_NOINTR);
- nfs4_increment_seqid(status, sp);
+ nfs_increment_open_seqid(status, arg.seqid);
+ if (status != 0)
+ goto out_free;
+ if(res.rflags & NFS4_OPEN_RESULT_CONFIRM) {
+ status = _nfs4_proc_open_confirm(server->client, NFS_FH(inode),
+ sp, &res.stateid, arg.seqid);
+ if (status != 0)
+ goto out_free;
+ }
+ nfs_confirm_seqid(&sp->so_seqid, 0);
if (status >= 0) {
memcpy(state->stateid.data, res.stateid.data,
sizeof(state->stateid.data));
clear_bit(NFS_DELEGATED_STATE, &state->flags);
}
+out_free:
+ nfs_free_seqid(arg.seqid);
out:
- up(&sp->so_sema);
dput(parent);
return status;
}
@@ -345,11 +383,11 @@ int nfs4_open_delegation_recall(struct dentry *dentry, struct nfs4_state *state)
return err;
}
-static inline int _nfs4_proc_open_confirm(struct rpc_clnt *clnt, const struct nfs_fh *fh, struct nfs4_state_owner *sp, nfs4_stateid *stateid)
+static int _nfs4_proc_open_confirm(struct rpc_clnt *clnt, const struct nfs_fh *fh, struct nfs4_state_owner *sp, nfs4_stateid *stateid, struct nfs_seqid *seqid)
{
struct nfs_open_confirmargs arg = {
.fh = fh,
- .seqid = sp->so_seqid,
+ .seqid = seqid,
.stateid = *stateid,
};
struct nfs_open_confirmres res;
@@ -362,7 +400,9 @@ static inline int _nfs4_proc_open_confirm(struct rpc_clnt *clnt, const struct nf
int status;
status = rpc_call_sync(clnt, &msg, RPC_TASK_NOINTR);
- nfs4_increment_seqid(status, sp);
+ /* Confirm the sequence as being established */
+ nfs_confirm_seqid(&sp->so_seqid, status);
+ nfs_increment_open_seqid(status, seqid);
if (status >= 0)
memcpy(stateid, &res.stateid, sizeof(*stateid));
return status;
@@ -380,21 +420,41 @@ static int _nfs4_proc_open(struct inode *dir, struct nfs4_state_owner *sp, stru
int status;
/* Update sequence id. The caller must serialize! */
- o_arg->seqid = sp->so_seqid;
o_arg->id = sp->so_id;
o_arg->clientid = sp->so_client->cl_clientid;
status = rpc_call_sync(server->client, &msg, RPC_TASK_NOINTR);
- nfs4_increment_seqid(status, sp);
+ if (status == 0) {
+ /* OPEN on anything except a regular file is disallowed in NFSv4 */
+ switch (o_res->f_attr->mode & S_IFMT) {
+ case S_IFREG:
+ break;
+ case S_IFLNK:
+ status = -ELOOP;
+ break;
+ case S_IFDIR:
+ status = -EISDIR;
+ break;
+ default:
+ status = -ENOTDIR;
+ }
+ }
+
+ nfs_increment_open_seqid(status, o_arg->seqid);
if (status != 0)
goto out;
- update_changeattr(dir, &o_res->cinfo);
+ if (o_arg->open_flags & O_CREAT) {
+ update_changeattr(dir, &o_res->cinfo);
+ nfs_post_op_update_inode(dir, o_res->dir_attr);
+ } else
+ nfs_refresh_inode(dir, o_res->dir_attr);
if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
status = _nfs4_proc_open_confirm(server->client, &o_res->fh,
- sp, &o_res->stateid);
+ sp, &o_res->stateid, o_arg->seqid);
if (status != 0)
goto out;
}
+ nfs_confirm_seqid(&sp->so_seqid, 0);
if (!(o_res->f_attr->valid & NFS_ATTR_FATTR))
status = server->rpc_ops->getattr(server, &o_res->fh, o_res->f_attr);
out:
@@ -441,9 +501,7 @@ static int _nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *st
struct inode *inode = state->inode;
struct nfs_server *server = NFS_SERVER(dir);
struct nfs_delegation *delegation = NFS_I(inode)->delegation;
- struct nfs_fattr f_attr = {
- .valid = 0,
- };
+ struct nfs_fattr f_attr, dir_attr;
struct nfs_openargs o_arg = {
.fh = NFS_FH(dir),
.open_flags = state->state,
@@ -453,6 +511,7 @@ static int _nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *st
};
struct nfs_openres o_res = {
.f_attr = &f_attr,
+ .dir_attr = &dir_attr,
.server = server,
};
int status = 0;
@@ -465,6 +524,12 @@ static int _nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *st
set_bit(NFS_DELEGATED_STATE, &state->flags);
goto out;
}
+ o_arg.seqid = nfs_alloc_seqid(&sp->so_seqid);
+ status = -ENOMEM;
+ if (o_arg.seqid == NULL)
+ goto out;
+ nfs_fattr_init(&f_attr);
+ nfs_fattr_init(&dir_attr);
status = _nfs4_proc_open(dir, sp, &o_arg, &o_res);
if (status != 0)
goto out_nodeleg;
@@ -490,6 +555,7 @@ static int _nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *st
nfs_inode_reclaim_delegation(inode, sp->so_cred, &o_res);
}
out_nodeleg:
+ nfs_free_seqid(o_arg.seqid);
clear_bit(NFS_DELEGATED_STATE, &state->flags);
out:
dput(parent);
@@ -564,7 +630,6 @@ static int _nfs4_open_delegated(struct inode *inode, int flags, struct rpc_cred
dprintk("%s: nfs4_get_state_owner failed!\n", __FUNCTION__);
goto out_err;
}
- down(&sp->so_sema);
state = nfs4_get_open_state(inode, sp);
if (state == NULL)
goto out_err;
@@ -589,7 +654,6 @@ static int _nfs4_open_delegated(struct inode *inode, int flags, struct rpc_cred
set_bit(NFS_DELEGATED_STATE, &state->flags);
update_open_stateid(state, &delegation->stateid, open_flags);
out_ok:
- up(&sp->so_sema);
nfs4_put_state_owner(sp);
up_read(&nfsi->rwsem);
up_read(&clp->cl_sem);
@@ -600,11 +664,12 @@ out_err:
if (sp != NULL) {
if (state != NULL)
nfs4_put_open_state(state);
- up(&sp->so_sema);
nfs4_put_state_owner(sp);
}
up_read(&nfsi->rwsem);
up_read(&clp->cl_sem);
+ if (err != -EACCES)
+ nfs_inode_return_delegation(inode);
return err;
}
@@ -635,9 +700,7 @@ static int _nfs4_do_open(struct inode *dir, struct dentry *dentry, int flags, st
struct nfs4_client *clp = server->nfs4_state;
struct inode *inode = NULL;
int status;
- struct nfs_fattr f_attr = {
- .valid = 0,
- };
+ struct nfs_fattr f_attr, dir_attr;
struct nfs_openargs o_arg = {
.fh = NFS_FH(dir),
.open_flags = flags,
@@ -648,6 +711,7 @@ static int _nfs4_do_open(struct inode *dir, struct dentry *dentry, int flags, st
};
struct nfs_openres o_res = {
.f_attr = &f_attr,
+ .dir_attr = &dir_attr,
.server = server,
};
@@ -665,8 +729,12 @@ static int _nfs4_do_open(struct inode *dir, struct dentry *dentry, int flags, st
} else
o_arg.u.attrs = sattr;
/* Serialization for the sequence id */
- down(&sp->so_sema);
+ o_arg.seqid = nfs_alloc_seqid(&sp->so_seqid);
+ if (o_arg.seqid == NULL)
+ return -ENOMEM;
+ nfs_fattr_init(&f_attr);
+ nfs_fattr_init(&dir_attr);
status = _nfs4_proc_open(dir, sp, &o_arg, &o_res);
if (status != 0)
goto out_err;
@@ -681,7 +749,7 @@ static int _nfs4_do_open(struct inode *dir, struct dentry *dentry, int flags, st
update_open_stateid(state, &o_res.stateid, flags);
if (o_res.delegation_type != 0)
nfs_inode_set_delegation(inode, cred, &o_res);
- up(&sp->so_sema);
+ nfs_free_seqid(o_arg.seqid);
nfs4_put_state_owner(sp);
up_read(&clp->cl_sem);
*res = state;
@@ -690,7 +758,7 @@ out_err:
if (sp != NULL) {
if (state != NULL)
nfs4_put_open_state(state);
- up(&sp->so_sema);
+ nfs_free_seqid(o_arg.seqid);
nfs4_put_state_owner(sp);
}
/* Note: clp->cl_sem must be released before nfs4_put_open_state()! */
@@ -718,7 +786,7 @@ static struct nfs4_state *nfs4_do_open(struct inode *dir, struct dentry *dentry,
* It is actually a sign of a bug on the client or on the server.
*
* If we receive a BAD_SEQID error in the particular case of
- * doing an OPEN, we assume that nfs4_increment_seqid() will
+ * doing an OPEN, we assume that nfs_increment_open_seqid() will
* have unhashed the old state_owner for us, and that we can
* therefore safely retry using a new one. We should still warn
* the user though...
@@ -728,6 +796,16 @@ static struct nfs4_state *nfs4_do_open(struct inode *dir, struct dentry *dentry,
exception.retry = 1;
continue;
}
+ /*
+ * BAD_STATEID on OPEN means that the server cancelled our
+ * state before it received the OPEN_CONFIRM.
+ * Recover by retrying the request as per the discussion
+ * on Page 181 of RFC3530.
+ */
+ if (status == -NFS4ERR_BAD_STATEID) {
+ exception.retry = 1;
+ continue;
+ }
res = ERR_PTR(nfs4_handle_exception(NFS_SERVER(dir),
status, &exception));
} while (exception.retry);
@@ -755,7 +833,7 @@ static int _nfs4_do_setattr(struct nfs_server *server, struct nfs_fattr *fattr,
};
int status;
- fattr->valid = 0;
+ nfs_fattr_init(fattr);
if (state != NULL) {
msg.rpc_cred = state->owner->so_cred;
@@ -787,19 +865,30 @@ struct nfs4_closedata {
struct nfs4_state *state;
struct nfs_closeargs arg;
struct nfs_closeres res;
+ struct nfs_fattr fattr;
};
+static void nfs4_free_closedata(struct nfs4_closedata *calldata)
+{
+ struct nfs4_state *state = calldata->state;
+ struct nfs4_state_owner *sp = state->owner;
+
+ nfs4_put_open_state(calldata->state);
+ nfs_free_seqid(calldata->arg.seqid);
+ nfs4_put_state_owner(sp);
+ kfree(calldata);
+}
+
static void nfs4_close_done(struct rpc_task *task)
{
struct nfs4_closedata *calldata = (struct nfs4_closedata *)task->tk_calldata;
struct nfs4_state *state = calldata->state;
- struct nfs4_state_owner *sp = state->owner;
struct nfs_server *server = NFS_SERVER(calldata->inode);
/* hmm. we are done with the inode, and in the process of freeing
* the state_owner. we keep this around to process errors
*/
- nfs4_increment_seqid(task->tk_status, sp);
+ nfs_increment_open_seqid(task->tk_status, calldata->arg.seqid);
switch (task->tk_status) {
case 0:
memcpy(&state->stateid, &calldata->res.stateid,
@@ -816,25 +905,49 @@ static void nfs4_close_done(struct rpc_task *task)
return;
}
}
+ nfs_refresh_inode(calldata->inode, calldata->res.fattr);
state->state = calldata->arg.open_flags;
- nfs4_put_open_state(state);
- up(&sp->so_sema);
- nfs4_put_state_owner(sp);
- up_read(&server->nfs4_state->cl_sem);
- kfree(calldata);
+ nfs4_free_closedata(calldata);
}
-static inline int nfs4_close_call(struct rpc_clnt *clnt, struct nfs4_closedata *calldata)
+static void nfs4_close_begin(struct rpc_task *task)
{
+ struct nfs4_closedata *calldata = (struct nfs4_closedata *)task->tk_calldata;
+ struct nfs4_state *state = calldata->state;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE],
.rpc_argp = &calldata->arg,
.rpc_resp = &calldata->res,
- .rpc_cred = calldata->state->owner->so_cred,
+ .rpc_cred = state->owner->so_cred,
};
- if (calldata->arg.open_flags != 0)
+ int mode = 0;
+ int status;
+
+ status = nfs_wait_on_sequence(calldata->arg.seqid, task);
+ if (status != 0)
+ return;
+ /* Don't reorder reads */
+ smp_rmb();
+ /* Recalculate the new open mode in case someone reopened the file
+ * while we were waiting in line to be scheduled.
+ */
+ if (state->nreaders != 0)
+ mode |= FMODE_READ;
+ if (state->nwriters != 0)
+ mode |= FMODE_WRITE;
+ if (test_bit(NFS_DELEGATED_STATE, &state->flags))
+ state->state = mode;
+ if (mode == state->state) {
+ nfs4_free_closedata(calldata);
+ task->tk_exit = NULL;
+ rpc_exit(task, 0);
+ return;
+ }
+ nfs_fattr_init(calldata->res.fattr);
+ if (mode != 0)
msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE];
- return rpc_call_async(clnt, &msg, 0, nfs4_close_done, calldata);
+ calldata->arg.open_flags = mode;
+ rpc_call_setup(task, &msg, 0);
}
/*
@@ -850,40 +963,57 @@ static inline int nfs4_close_call(struct rpc_clnt *clnt, struct nfs4_closedata *
*/
int nfs4_do_close(struct inode *inode, struct nfs4_state *state, mode_t mode)
{
+ struct nfs_server *server = NFS_SERVER(inode);
struct nfs4_closedata *calldata;
- int status;
+ int status = -ENOMEM;
- /* Tell caller we're done */
- if (test_bit(NFS_DELEGATED_STATE, &state->flags)) {
- state->state = mode;
- return 0;
- }
- calldata = (struct nfs4_closedata *)kmalloc(sizeof(*calldata), GFP_KERNEL);
+ calldata = kmalloc(sizeof(*calldata), GFP_KERNEL);
if (calldata == NULL)
- return -ENOMEM;
+ goto out;
calldata->inode = inode;
calldata->state = state;
calldata->arg.fh = NFS_FH(inode);
+ calldata->arg.stateid = &state->stateid;
/* Serialization for the sequence id */
- calldata->arg.seqid = state->owner->so_seqid;
- calldata->arg.open_flags = mode;
- memcpy(&calldata->arg.stateid, &state->stateid,
- sizeof(calldata->arg.stateid));
- status = nfs4_close_call(NFS_SERVER(inode)->client, calldata);
- /*
- * Return -EINPROGRESS on success in order to indicate to the
- * caller that an asynchronous RPC call has been launched, and
- * that it will release the semaphores on completion.
- */
- return (status == 0) ? -EINPROGRESS : status;
+ calldata->arg.seqid = nfs_alloc_seqid(&state->owner->so_seqid);
+ if (calldata->arg.seqid == NULL)
+ goto out_free_calldata;
+ calldata->arg.bitmask = server->attr_bitmask;
+ calldata->res.fattr = &calldata->fattr;
+ calldata->res.server = server;
+
+ status = nfs4_call_async(server->client, nfs4_close_begin,
+ nfs4_close_done, calldata);
+ if (status == 0)
+ goto out;
+
+ nfs_free_seqid(calldata->arg.seqid);
+out_free_calldata:
+ kfree(calldata);
+out:
+ return status;
}
-struct inode *
+static void nfs4_intent_set_file(struct nameidata *nd, struct dentry *dentry, struct nfs4_state *state)
+{
+ struct file *filp;
+
+ filp = lookup_instantiate_filp(nd, dentry, NULL);
+ if (!IS_ERR(filp)) {
+ struct nfs_open_context *ctx;
+ ctx = (struct nfs_open_context *)filp->private_data;
+ ctx->state = state;
+ } else
+ nfs4_close_state(state, nd->intent.open.flags);
+}
+
+struct dentry *
nfs4_atomic_open(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
{
struct iattr attr;
struct rpc_cred *cred;
struct nfs4_state *state;
+ struct dentry *res;
if (nd->flags & LOOKUP_CREATE) {
attr.ia_mode = nd->intent.open.create_mode;
@@ -897,16 +1027,23 @@ nfs4_atomic_open(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
cred = rpcauth_lookupcred(NFS_SERVER(dir)->client->cl_auth, 0);
if (IS_ERR(cred))
- return (struct inode *)cred;
+ return (struct dentry *)cred;
state = nfs4_do_open(dir, dentry, nd->intent.open.flags, &attr, cred);
put_rpccred(cred);
- if (IS_ERR(state))
- return (struct inode *)state;
- return state->inode;
+ if (IS_ERR(state)) {
+ if (PTR_ERR(state) == -ENOENT)
+ d_add(dentry, NULL);
+ return (struct dentry *)state;
+ }
+ res = d_add_unique(dentry, state->inode);
+ if (res != NULL)
+ dentry = res;
+ nfs4_intent_set_file(nd, dentry, state);
+ return res;
}
int
-nfs4_open_revalidate(struct inode *dir, struct dentry *dentry, int openflags)
+nfs4_open_revalidate(struct inode *dir, struct dentry *dentry, int openflags, struct nameidata *nd)
{
struct rpc_cred *cred;
struct nfs4_state *state;
@@ -919,18 +1056,30 @@ nfs4_open_revalidate(struct inode *dir, struct dentry *dentry, int openflags)
if (IS_ERR(state))
state = nfs4_do_open(dir, dentry, openflags, NULL, cred);
put_rpccred(cred);
- if (state == ERR_PTR(-ENOENT) && dentry->d_inode == 0)
- return 1;
- if (IS_ERR(state))
- return 0;
+ if (IS_ERR(state)) {
+ switch (PTR_ERR(state)) {
+ case -EPERM:
+ case -EACCES:
+ case -EDQUOT:
+ case -ENOSPC:
+ case -EROFS:
+ lookup_instantiate_filp(nd, (struct dentry *)state, NULL);
+ return 1;
+ case -ENOENT:
+ if (dentry->d_inode == NULL)
+ return 1;
+ }
+ goto out_drop;
+ }
inode = state->inode;
+ iput(inode);
if (inode == dentry->d_inode) {
- iput(inode);
+ nfs4_intent_set_file(nd, dentry, state);
return 1;
}
- d_drop(dentry);
nfs4_close_state(state, openflags);
- iput(inode);
+out_drop:
+ d_drop(dentry);
return 0;
}
@@ -974,13 +1123,12 @@ static int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fh
static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
struct nfs_fsinfo *info)
{
- struct nfs_fattr * fattr = info->fattr;
struct nfs4_lookup_root_arg args = {
.bitmask = nfs4_fattr_bitmap,
};
struct nfs4_lookup_res res = {
.server = server,
- .fattr = fattr,
+ .fattr = info->fattr,
.fh = fhandle,
};
struct rpc_message msg = {
@@ -988,7 +1136,7 @@ static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
.rpc_argp = &args,
.rpc_resp = &res,
};
- fattr->valid = 0;
+ nfs_fattr_init(info->fattr);
return rpc_call_sync(server->client, &msg, 0);
}
@@ -1051,7 +1199,7 @@ static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *fhandle,
q.len = p - q.name;
do {
- fattr->valid = 0;
+ nfs_fattr_init(fattr);
status = nfs4_handle_exception(server,
rpc_call_sync(server->client, &msg, 0),
&exception);
@@ -1088,7 +1236,7 @@ static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle,
.rpc_resp = &res,
};
- fattr->valid = 0;
+ nfs_fattr_init(fattr);
return rpc_call_sync(server->client, &msg, 0);
}
@@ -1130,7 +1278,7 @@ nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
struct nfs4_state *state;
int status;
- fattr->valid = 0;
+ nfs_fattr_init(fattr);
cred = rpcauth_lookupcred(NFS_SERVER(inode)->client->cl_auth, 0);
if (IS_ERR(cred))
@@ -1176,7 +1324,7 @@ static int _nfs4_proc_lookup(struct inode *dir, struct qstr *name,
.rpc_resp = &res,
};
- fattr->valid = 0;
+ nfs_fattr_init(fattr);
dprintk("NFS call lookup %s\n", name->name);
status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
@@ -1325,7 +1473,7 @@ static int _nfs4_proc_read(struct nfs_read_data *rdata)
dprintk("NFS call read %d @ %Ld\n", rdata->args.count,
(long long) rdata->args.offset);
- fattr->valid = 0;
+ nfs_fattr_init(fattr);
status = rpc_call_sync(server->client, &msg, flags);
if (!status)
renew_lease(server, timestamp);
@@ -1362,7 +1510,7 @@ static int _nfs4_proc_write(struct nfs_write_data *wdata)
dprintk("NFS call write %d @ %Ld\n", wdata->args.count,
(long long) wdata->args.offset);
- fattr->valid = 0;
+ nfs_fattr_init(fattr);
status = rpc_call_sync(server->client, &msg, rpcflags);
dprintk("NFS reply write: %d\n", status);
return status;
@@ -1396,7 +1544,7 @@ static int _nfs4_proc_commit(struct nfs_write_data *cdata)
dprintk("NFS call commit %d @ %Ld\n", cdata->args.count,
(long long) cdata->args.offset);
- fattr->valid = 0;
+ nfs_fattr_init(fattr);
status = rpc_call_sync(server->client, &msg, 0);
dprintk("NFS reply commit: %d\n", status);
return status;
@@ -1431,7 +1579,7 @@ static int nfs4_proc_commit(struct nfs_write_data *cdata)
static int
nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
- int flags)
+ int flags, struct nameidata *nd)
{
struct nfs4_state *state;
struct rpc_cred *cred;
@@ -1453,24 +1601,30 @@ nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
struct nfs_fattr fattr;
status = nfs4_do_setattr(NFS_SERVER(dir), &fattr,
NFS_FH(state->inode), sattr, state);
- if (status == 0) {
+ if (status == 0)
nfs_setattr_update_inode(state->inode, sattr);
- goto out;
- }
- } else if (flags != 0)
- goto out;
- nfs4_close_state(state, flags);
+ }
+ if (status == 0 && nd != NULL && (nd->flags & LOOKUP_OPEN))
+ nfs4_intent_set_file(nd, dentry, state);
+ else
+ nfs4_close_state(state, flags);
out:
return status;
}
static int _nfs4_proc_remove(struct inode *dir, struct qstr *name)
{
+ struct nfs_server *server = NFS_SERVER(dir);
struct nfs4_remove_arg args = {
.fh = NFS_FH(dir),
.name = name,
+ .bitmask = server->attr_bitmask,
+ };
+ struct nfs_fattr dir_attr;
+ struct nfs4_remove_res res = {
+ .server = server,
+ .dir_attr = &dir_attr,
};
- struct nfs4_change_info res;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE],
.rpc_argp = &args,
@@ -1478,9 +1632,12 @@ static int _nfs4_proc_remove(struct inode *dir, struct qstr *name)
};
int status;
- status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
- if (status == 0)
- update_changeattr(dir, &res);
+ nfs_fattr_init(res.dir_attr);
+ status = rpc_call_sync(server->client, &msg, 0);
+ if (status == 0) {
+ update_changeattr(dir, &res.cinfo);
+ nfs_post_op_update_inode(dir, res.dir_attr);
+ }
return status;
}
@@ -1498,12 +1655,14 @@ static int nfs4_proc_remove(struct inode *dir, struct qstr *name)
struct unlink_desc {
struct nfs4_remove_arg args;
- struct nfs4_change_info res;
+ struct nfs4_remove_res res;
+ struct nfs_fattr dir_attr;
};
static int nfs4_proc_unlink_setup(struct rpc_message *msg, struct dentry *dir,
struct qstr *name)
{
+ struct nfs_server *server = NFS_SERVER(dir->d_inode);
struct unlink_desc *up;
up = (struct unlink_desc *) kmalloc(sizeof(*up), GFP_KERNEL);
@@ -1512,6 +1671,9 @@ static int nfs4_proc_unlink_setup(struct rpc_message *msg, struct dentry *dir,
up->args.fh = NFS_FH(dir->d_inode);
up->args.name = name;
+ up->args.bitmask = server->attr_bitmask;
+ up->res.server = server;
+ up->res.dir_attr = &up->dir_attr;
msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE];
msg->rpc_argp = &up->args;
@@ -1526,7 +1688,8 @@ static int nfs4_proc_unlink_done(struct dentry *dir, struct rpc_task *task)
if (msg->rpc_resp != NULL) {
up = container_of(msg->rpc_resp, struct unlink_desc, res);
- update_changeattr(dir->d_inode, &up->res);
+ update_changeattr(dir->d_inode, &up->res.cinfo);
+ nfs_post_op_update_inode(dir->d_inode, up->res.dir_attr);
kfree(up);
msg->rpc_resp = NULL;
msg->rpc_argp = NULL;
@@ -1537,13 +1700,20 @@ static int nfs4_proc_unlink_done(struct dentry *dir, struct rpc_task *task)
static int _nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name,
struct inode *new_dir, struct qstr *new_name)
{
+ struct nfs_server *server = NFS_SERVER(old_dir);
struct nfs4_rename_arg arg = {
.old_dir = NFS_FH(old_dir),
.new_dir = NFS_FH(new_dir),
.old_name = old_name,
.new_name = new_name,
+ .bitmask = server->attr_bitmask,
+ };
+ struct nfs_fattr old_fattr, new_fattr;
+ struct nfs4_rename_res res = {
+ .server = server,
+ .old_fattr = &old_fattr,
+ .new_fattr = &new_fattr,
};
- struct nfs4_rename_res res = { };
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME],
.rpc_argp = &arg,
@@ -1551,11 +1721,15 @@ static int _nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name,
};
int status;
- status = rpc_call_sync(NFS_CLIENT(old_dir), &msg, 0);
+ nfs_fattr_init(res.old_fattr);
+ nfs_fattr_init(res.new_fattr);
+ status = rpc_call_sync(server->client, &msg, 0);
if (!status) {
update_changeattr(old_dir, &res.old_cinfo);
+ nfs_post_op_update_inode(old_dir, res.old_fattr);
update_changeattr(new_dir, &res.new_cinfo);
+ nfs_post_op_update_inode(new_dir, res.new_fattr);
}
return status;
}
@@ -1576,22 +1750,34 @@ static int nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name,
static int _nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
{
+ struct nfs_server *server = NFS_SERVER(inode);
struct nfs4_link_arg arg = {
.fh = NFS_FH(inode),
.dir_fh = NFS_FH(dir),
.name = name,
+ .bitmask = server->attr_bitmask,
+ };
+ struct nfs_fattr fattr, dir_attr;
+ struct nfs4_link_res res = {
+ .server = server,
+ .fattr = &fattr,
+ .dir_attr = &dir_attr,
};
- struct nfs4_change_info cinfo = { };
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LINK],
.rpc_argp = &arg,
- .rpc_resp = &cinfo,
+ .rpc_resp = &res,
};
int status;
- status = rpc_call_sync(NFS_CLIENT(inode), &msg, 0);
- if (!status)
- update_changeattr(dir, &cinfo);
+ nfs_fattr_init(res.fattr);
+ nfs_fattr_init(res.dir_attr);
+ status = rpc_call_sync(server->client, &msg, 0);
+ if (!status) {
+ update_changeattr(dir, &res.cinfo);
+ nfs_post_op_update_inode(dir, res.dir_attr);
+ nfs_refresh_inode(inode, res.fattr);
+ }
return status;
}
@@ -1613,6 +1799,7 @@ static int _nfs4_proc_symlink(struct inode *dir, struct qstr *name,
struct nfs_fattr *fattr)
{
struct nfs_server *server = NFS_SERVER(dir);
+ struct nfs_fattr dir_fattr;
struct nfs4_create_arg arg = {
.dir_fh = NFS_FH(dir),
.server = server,
@@ -1625,6 +1812,7 @@ static int _nfs4_proc_symlink(struct inode *dir, struct qstr *name,
.server = server,
.fh = fhandle,
.fattr = fattr,
+ .dir_fattr = &dir_fattr,
};
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SYMLINK],
@@ -1636,11 +1824,13 @@ static int _nfs4_proc_symlink(struct inode *dir, struct qstr *name,
if (path->len > NFS4_MAXPATHLEN)
return -ENAMETOOLONG;
arg.u.symlink = path;
- fattr->valid = 0;
+ nfs_fattr_init(fattr);
+ nfs_fattr_init(&dir_fattr);
status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
if (!status)
update_changeattr(dir, &res.dir_cinfo);
+ nfs_post_op_update_inode(dir, res.dir_fattr);
return status;
}
@@ -1664,7 +1854,7 @@ static int _nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
{
struct nfs_server *server = NFS_SERVER(dir);
struct nfs_fh fhandle;
- struct nfs_fattr fattr;
+ struct nfs_fattr fattr, dir_fattr;
struct nfs4_create_arg arg = {
.dir_fh = NFS_FH(dir),
.server = server,
@@ -1677,6 +1867,7 @@ static int _nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
.server = server,
.fh = &fhandle,
.fattr = &fattr,
+ .dir_fattr = &dir_fattr,
};
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE],
@@ -1685,11 +1876,13 @@ static int _nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
};
int status;
- fattr.valid = 0;
+ nfs_fattr_init(&fattr);
+ nfs_fattr_init(&dir_fattr);
status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
if (!status) {
update_changeattr(dir, &res.dir_cinfo);
+ nfs_post_op_update_inode(dir, res.dir_fattr);
status = nfs_instantiate(dentry, &fhandle, &fattr);
}
return status;
@@ -1762,7 +1955,7 @@ static int _nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
{
struct nfs_server *server = NFS_SERVER(dir);
struct nfs_fh fh;
- struct nfs_fattr fattr;
+ struct nfs_fattr fattr, dir_fattr;
struct nfs4_create_arg arg = {
.dir_fh = NFS_FH(dir),
.server = server,
@@ -1774,6 +1967,7 @@ static int _nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
.server = server,
.fh = &fh,
.fattr = &fattr,
+ .dir_fattr = &dir_fattr,
};
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE],
@@ -1783,7 +1977,8 @@ static int _nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
int status;
int mode = sattr->ia_mode;
- fattr.valid = 0;
+ nfs_fattr_init(&fattr);
+ nfs_fattr_init(&dir_fattr);
BUG_ON(!(sattr->ia_valid & ATTR_MODE));
BUG_ON(!S_ISFIFO(mode) && !S_ISBLK(mode) && !S_ISCHR(mode) && !S_ISSOCK(mode));
@@ -1805,6 +2000,7 @@ static int _nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
if (status == 0) {
update_changeattr(dir, &res.dir_cinfo);
+ nfs_post_op_update_inode(dir, res.dir_fattr);
status = nfs_instantiate(dentry, &fh, &fattr);
}
return status;
@@ -1836,7 +2032,7 @@ static int _nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle,
.rpc_resp = fsstat,
};
- fsstat->fattr->valid = 0;
+ nfs_fattr_init(fsstat->fattr);
return rpc_call_sync(server->client, &msg, 0);
}
@@ -1883,7 +2079,7 @@ static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, str
static int nfs4_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
{
- fsinfo->fattr->valid = 0;
+ nfs_fattr_init(fsinfo->fattr);
return nfs4_do_fsinfo(server, fhandle, fsinfo);
}
@@ -1906,7 +2102,7 @@ static int _nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle
return 0;
}
- pathconf->fattr->valid = 0;
+ nfs_fattr_init(pathconf->fattr);
return rpc_call_sync(server->client, &msg, 0);
}
@@ -1973,8 +2169,10 @@ nfs4_write_done(struct rpc_task *task)
rpc_restart_call(task);
return;
}
- if (task->tk_status >= 0)
+ if (task->tk_status >= 0) {
renew_lease(NFS_SERVER(inode), data->timestamp);
+ nfs_post_op_update_inode(inode, data->res.fattr);
+ }
/* Call back common NFS writeback processing */
nfs_writeback_done(task);
}
@@ -1990,6 +2188,7 @@ nfs4_proc_write_setup(struct nfs_write_data *data, int how)
.rpc_cred = data->cred,
};
struct inode *inode = data->inode;
+ struct nfs_server *server = NFS_SERVER(inode);
int stable;
int flags;
@@ -2001,6 +2200,8 @@ nfs4_proc_write_setup(struct nfs_write_data *data, int how)
} else
stable = NFS_UNSTABLE;
data->args.stable = stable;
+ data->args.bitmask = server->attr_bitmask;
+ data->res.server = server;
data->timestamp = jiffies;
@@ -2022,6 +2223,8 @@ nfs4_commit_done(struct rpc_task *task)
rpc_restart_call(task);
return;
}
+ if (task->tk_status >= 0)
+ nfs_post_op_update_inode(inode, data->res.fattr);
/* Call back common NFS writeback processing */
nfs_commit_done(task);
}
@@ -2037,8 +2240,12 @@ nfs4_proc_commit_setup(struct nfs_write_data *data, int how)
.rpc_cred = data->cred,
};
struct inode *inode = data->inode;
+ struct nfs_server *server = NFS_SERVER(inode);
int flags;
+ data->args.bitmask = server->attr_bitmask;
+ data->res.server = server;
+
/* Set the initial flags for the task. */
flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;
@@ -2106,65 +2313,6 @@ nfs4_proc_renew(struct nfs4_client *clp)
return 0;
}
-/*
- * We will need to arrange for the VFS layer to provide an atomic open.
- * Until then, this open method is prone to inefficiency and race conditions
- * due to the lookup, potential create, and open VFS calls from sys_open()
- * placed on the wire.
- */
-static int
-nfs4_proc_file_open(struct inode *inode, struct file *filp)
-{
- struct dentry *dentry = filp->f_dentry;
- struct nfs_open_context *ctx;
- struct nfs4_state *state = NULL;
- struct rpc_cred *cred;
- int status = -ENOMEM;
-
- dprintk("nfs4_proc_file_open: starting on (%.*s/%.*s)\n",
- (int)dentry->d_parent->d_name.len,
- dentry->d_parent->d_name.name,
- (int)dentry->d_name.len, dentry->d_name.name);
-
-
- /* Find our open stateid */
- cred = rpcauth_lookupcred(NFS_SERVER(inode)->client->cl_auth, 0);
- if (IS_ERR(cred))
- return PTR_ERR(cred);
- ctx = alloc_nfs_open_context(dentry, cred);
- put_rpccred(cred);
- if (unlikely(ctx == NULL))
- return -ENOMEM;
- status = -EIO; /* ERACE actually */
- state = nfs4_find_state(inode, cred, filp->f_mode);
- if (unlikely(state == NULL))
- goto no_state;
- ctx->state = state;
- nfs4_close_state(state, filp->f_mode);
- ctx->mode = filp->f_mode;
- nfs_file_set_open_context(filp, ctx);
- put_nfs_open_context(ctx);
- if (filp->f_mode & FMODE_WRITE)
- nfs_begin_data_update(inode);
- return 0;
-no_state:
- printk(KERN_WARNING "NFS: v4 raced in function %s\n", __FUNCTION__);
- put_nfs_open_context(ctx);
- return status;
-}
-
-/*
- * Release our state
- */
-static int
-nfs4_proc_file_release(struct inode *inode, struct file *filp)
-{
- if (filp->f_mode & FMODE_WRITE)
- nfs_end_data_update(inode);
- nfs_file_clear_open_context(filp);
- return 0;
-}
-
static inline int nfs4_server_supports_acls(struct nfs_server *server)
{
return (server->caps & NFS_CAP_ACLS)
@@ -2285,7 +2433,7 @@ static inline ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size
return -ENOMEM;
args.acl_pages[0] = localpage;
args.acl_pgbase = 0;
- args.acl_len = PAGE_SIZE;
+ resp_len = args.acl_len = PAGE_SIZE;
} else {
resp_buf = buf;
buf_to_pages(buf, buflen, args.acl_pages, &args.acl_pgbase);
@@ -2345,6 +2493,7 @@ static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen
if (!nfs4_server_supports_acls(server))
return -EOPNOTSUPP;
+ nfs_inode_return_delegation(inode);
buf_to_pages(buf, buflen, arg.acl_pages, &arg.acl_pgbase);
ret = rpc_call_sync(NFS_SERVER(inode)->client, &msg, 0);
if (ret == 0)
@@ -2353,7 +2502,7 @@ static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen
}
static int
-nfs4_async_handle_error(struct rpc_task *task, struct nfs_server *server)
+nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server)
{
struct nfs4_client *clp = server->nfs4_state;
@@ -2431,7 +2580,7 @@ static int nfs4_delay(struct rpc_clnt *clnt, long *timeout)
/* This is the error handling routine for processes that are allowed
* to sleep.
*/
-int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception)
+int nfs4_handle_exception(const struct nfs_server *server, int errorcode, struct nfs4_exception *exception)
{
struct nfs4_client *clp = server->nfs4_state;
int ret = errorcode;
@@ -2632,7 +2781,6 @@ static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock
down_read(&clp->cl_sem);
nlo.clientid = clp->cl_clientid;
- down(&state->lock_sema);
status = nfs4_set_lock_state(state, request);
if (status != 0)
goto out;
@@ -2659,7 +2807,6 @@ static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock
status = 0;
}
out:
- up(&state->lock_sema);
up_read(&clp->cl_sem);
return status;
}
@@ -2696,79 +2843,149 @@ static int do_vfs_lock(struct file *file, struct file_lock *fl)
return res;
}
-static int _nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request)
+struct nfs4_unlockdata {
+ struct nfs_lockargs arg;
+ struct nfs_locku_opargs luargs;
+ struct nfs_lockres res;
+ struct nfs4_lock_state *lsp;
+ struct nfs_open_context *ctx;
+ atomic_t refcount;
+ struct completion completion;
+};
+
+static void nfs4_locku_release_calldata(struct nfs4_unlockdata *calldata)
{
- struct inode *inode = state->inode;
- struct nfs_server *server = NFS_SERVER(inode);
- struct nfs4_client *clp = server->nfs4_state;
- struct nfs_lockargs arg = {
- .fh = NFS_FH(inode),
- .type = nfs4_lck_type(cmd, request),
- .offset = request->fl_start,
- .length = nfs4_lck_length(request),
- };
- struct nfs_lockres res = {
- .server = server,
- };
+ if (atomic_dec_and_test(&calldata->refcount)) {
+ nfs_free_seqid(calldata->luargs.seqid);
+ nfs4_put_lock_state(calldata->lsp);
+ put_nfs_open_context(calldata->ctx);
+ kfree(calldata);
+ }
+}
+
+static void nfs4_locku_complete(struct nfs4_unlockdata *calldata)
+{
+ complete(&calldata->completion);
+ nfs4_locku_release_calldata(calldata);
+}
+
+static void nfs4_locku_done(struct rpc_task *task)
+{
+ struct nfs4_unlockdata *calldata = (struct nfs4_unlockdata *)task->tk_calldata;
+
+ nfs_increment_lock_seqid(task->tk_status, calldata->luargs.seqid);
+ switch (task->tk_status) {
+ case 0:
+ memcpy(calldata->lsp->ls_stateid.data,
+ calldata->res.u.stateid.data,
+ sizeof(calldata->lsp->ls_stateid.data));
+ break;
+ case -NFS4ERR_STALE_STATEID:
+ case -NFS4ERR_EXPIRED:
+ nfs4_schedule_state_recovery(calldata->res.server->nfs4_state);
+ break;
+ default:
+ if (nfs4_async_handle_error(task, calldata->res.server) == -EAGAIN) {
+ rpc_restart_call(task);
+ return;
+ }
+ }
+ nfs4_locku_complete(calldata);
+}
+
+static void nfs4_locku_begin(struct rpc_task *task)
+{
+ struct nfs4_unlockdata *calldata = (struct nfs4_unlockdata *)task->tk_calldata;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKU],
- .rpc_argp = &arg,
- .rpc_resp = &res,
- .rpc_cred = state->owner->so_cred,
+ .rpc_argp = &calldata->arg,
+ .rpc_resp = &calldata->res,
+ .rpc_cred = calldata->lsp->ls_state->owner->so_cred,
};
+ int status;
+
+ status = nfs_wait_on_sequence(calldata->luargs.seqid, task);
+ if (status != 0)
+ return;
+ if ((calldata->lsp->ls_flags & NFS_LOCK_INITIALIZED) == 0) {
+ nfs4_locku_complete(calldata);
+ task->tk_exit = NULL;
+ rpc_exit(task, 0);
+ return;
+ }
+ rpc_call_setup(task, &msg, 0);
+}
+
+static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request)
+{
+ struct nfs4_unlockdata *calldata;
+ struct inode *inode = state->inode;
+ struct nfs_server *server = NFS_SERVER(inode);
struct nfs4_lock_state *lsp;
- struct nfs_locku_opargs luargs;
int status;
-
- down_read(&clp->cl_sem);
- down(&state->lock_sema);
+
status = nfs4_set_lock_state(state, request);
if (status != 0)
- goto out;
+ return status;
lsp = request->fl_u.nfs4_fl.owner;
/* We might have lost the locks! */
if ((lsp->ls_flags & NFS_LOCK_INITIALIZED) == 0)
- goto out;
- luargs.seqid = lsp->ls_seqid;
- memcpy(&luargs.stateid, &lsp->ls_stateid, sizeof(luargs.stateid));
- arg.u.locku = &luargs;
- status = rpc_call_sync(server->client, &msg, RPC_TASK_NOINTR);
- nfs4_increment_lock_seqid(status, lsp);
-
- if (status == 0)
- memcpy(&lsp->ls_stateid, &res.u.stateid,
- sizeof(lsp->ls_stateid));
-out:
- up(&state->lock_sema);
+ return 0;
+ calldata = kmalloc(sizeof(*calldata), GFP_KERNEL);
+ if (calldata == NULL)
+ return -ENOMEM;
+ calldata->luargs.seqid = nfs_alloc_seqid(&lsp->ls_seqid);
+ if (calldata->luargs.seqid == NULL) {
+ kfree(calldata);
+ return -ENOMEM;
+ }
+ calldata->luargs.stateid = &lsp->ls_stateid;
+ calldata->arg.fh = NFS_FH(inode);
+ calldata->arg.type = nfs4_lck_type(cmd, request);
+ calldata->arg.offset = request->fl_start;
+ calldata->arg.length = nfs4_lck_length(request);
+ calldata->arg.u.locku = &calldata->luargs;
+ calldata->res.server = server;
+ calldata->lsp = lsp;
+ atomic_inc(&lsp->ls_count);
+
+ /* Ensure we don't close file until we're done freeing locks! */
+ calldata->ctx = get_nfs_open_context((struct nfs_open_context*)request->fl_file->private_data);
+
+ atomic_set(&calldata->refcount, 2);
+ init_completion(&calldata->completion);
+
+ status = nfs4_call_async(NFS_SERVER(inode)->client, nfs4_locku_begin,
+ nfs4_locku_done, calldata);
if (status == 0)
- do_vfs_lock(request->fl_file, request);
- up_read(&clp->cl_sem);
+ wait_for_completion_interruptible(&calldata->completion);
+ do_vfs_lock(request->fl_file, request);
+ nfs4_locku_release_calldata(calldata);
return status;
}
-static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request)
-{
- struct nfs4_exception exception = { };
- int err;
-
- do {
- err = nfs4_handle_exception(NFS_SERVER(state->inode),
- _nfs4_proc_unlck(state, cmd, request),
- &exception);
- } while (exception.retry);
- return err;
-}
-
static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *request, int reclaim)
{
struct inode *inode = state->inode;
struct nfs_server *server = NFS_SERVER(inode);
struct nfs4_lock_state *lsp = request->fl_u.nfs4_fl.owner;
+ struct nfs_lock_opargs largs = {
+ .lock_stateid = &lsp->ls_stateid,
+ .open_stateid = &state->stateid,
+ .lock_owner = {
+ .clientid = server->nfs4_state->cl_clientid,
+ .id = lsp->ls_id,
+ },
+ .reclaim = reclaim,
+ };
struct nfs_lockargs arg = {
.fh = NFS_FH(inode),
.type = nfs4_lck_type(cmd, request),
.offset = request->fl_start,
.length = nfs4_lck_length(request),
+ .u = {
+ .lock = &largs,
+ },
};
struct nfs_lockres res = {
.server = server,
@@ -2779,53 +2996,39 @@ static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *r
.rpc_resp = &res,
.rpc_cred = state->owner->so_cred,
};
- struct nfs_lock_opargs largs = {
- .reclaim = reclaim,
- .new_lock_owner = 0,
- };
- int status;
+ int status = -ENOMEM;
- if (!(lsp->ls_flags & NFS_LOCK_INITIALIZED)) {
+ largs.lock_seqid = nfs_alloc_seqid(&lsp->ls_seqid);
+ if (largs.lock_seqid == NULL)
+ return -ENOMEM;
+ if (!(lsp->ls_seqid.flags & NFS_SEQID_CONFIRMED)) {
struct nfs4_state_owner *owner = state->owner;
- struct nfs_open_to_lock otl = {
- .lock_owner = {
- .clientid = server->nfs4_state->cl_clientid,
- },
- };
-
- otl.lock_seqid = lsp->ls_seqid;
- otl.lock_owner.id = lsp->ls_id;
- memcpy(&otl.open_stateid, &state->stateid, sizeof(otl.open_stateid));
- largs.u.open_lock = &otl;
+
+ largs.open_seqid = nfs_alloc_seqid(&owner->so_seqid);
+ if (largs.open_seqid == NULL)
+ goto out;
largs.new_lock_owner = 1;
- arg.u.lock = &largs;
- down(&owner->so_sema);
- otl.open_seqid = owner->so_seqid;
status = rpc_call_sync(server->client, &msg, RPC_TASK_NOINTR);
- /* increment open_owner seqid on success, and
- * seqid mutating errors */
- nfs4_increment_seqid(status, owner);
- up(&owner->so_sema);
- if (status == 0) {
- lsp->ls_flags |= NFS_LOCK_INITIALIZED;
- lsp->ls_seqid++;
+ /* increment open seqid on success, and seqid mutating errors */
+ if (largs.new_lock_owner != 0) {
+ nfs_increment_open_seqid(status, largs.open_seqid);
+ if (status == 0)
+ nfs_confirm_seqid(&lsp->ls_seqid, 0);
}
- } else {
- struct nfs_exist_lock el = {
- .seqid = lsp->ls_seqid,
- };
- memcpy(&el.stateid, &lsp->ls_stateid, sizeof(el.stateid));
- largs.u.exist_lock = &el;
- arg.u.lock = &largs;
+ nfs_free_seqid(largs.open_seqid);
+ } else
status = rpc_call_sync(server->client, &msg, RPC_TASK_NOINTR);
- /* increment seqid on success, and * seqid mutating errors*/
- nfs4_increment_lock_seqid(status, lsp);
- }
+ /* increment lock seqid on success, and seqid mutating errors*/
+ nfs_increment_lock_seqid(status, largs.lock_seqid);
/* save the returned stateid. */
- if (status == 0)
- memcpy(&lsp->ls_stateid, &res.u.stateid, sizeof(nfs4_stateid));
- else if (status == -NFS4ERR_DENIED)
+ if (status == 0) {
+ memcpy(lsp->ls_stateid.data, res.u.stateid.data,
+ sizeof(lsp->ls_stateid.data));
+ lsp->ls_flags |= NFS_LOCK_INITIALIZED;
+ } else if (status == -NFS4ERR_DENIED)
status = -EAGAIN;
+out:
+ nfs_free_seqid(largs.lock_seqid);
return status;
}
@@ -2865,11 +3068,9 @@ static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock
int status;
down_read(&clp->cl_sem);
- down(&state->lock_sema);
status = nfs4_set_lock_state(state, request);
if (status == 0)
status = _nfs4_do_setlk(state, cmd, request, 0);
- up(&state->lock_sema);
if (status == 0) {
/* Note: we always want to sleep here! */
request->fl_flags |= FL_SLEEP;
@@ -3024,8 +3225,8 @@ struct nfs_rpc_ops nfs_v4_clientops = {
.read_setup = nfs4_proc_read_setup,
.write_setup = nfs4_proc_write_setup,
.commit_setup = nfs4_proc_commit_setup,
- .file_open = nfs4_proc_file_open,
- .file_release = nfs4_proc_file_release,
+ .file_open = nfs_open,
+ .file_release = nfs_release,
.lock = nfs4_proc_lock,
.clear_acl_cache = nfs4_zap_acl_attr,
};
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index afe587d82f1..2d5a6a2b9de 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -264,13 +264,16 @@ nfs4_alloc_state_owner(void)
{
struct nfs4_state_owner *sp;
- sp = kmalloc(sizeof(*sp),GFP_KERNEL);
+ sp = kzalloc(sizeof(*sp),GFP_KERNEL);
if (!sp)
return NULL;
- init_MUTEX(&sp->so_sema);
- sp->so_seqid = 0; /* arbitrary */
+ spin_lock_init(&sp->so_lock);
INIT_LIST_HEAD(&sp->so_states);
INIT_LIST_HEAD(&sp->so_delegations);
+ rpc_init_wait_queue(&sp->so_sequence.wait, "Seqid_waitqueue");
+ sp->so_seqid.sequence = &sp->so_sequence;
+ spin_lock_init(&sp->so_sequence.lock);
+ INIT_LIST_HEAD(&sp->so_sequence.list);
atomic_set(&sp->so_count, 1);
return sp;
}
@@ -359,7 +362,6 @@ nfs4_alloc_open_state(void)
memset(state->stateid.data, 0, sizeof(state->stateid.data));
atomic_set(&state->count, 1);
INIT_LIST_HEAD(&state->lock_states);
- init_MUTEX(&state->lock_sema);
spin_lock_init(&state->state_lock);
return state;
}
@@ -437,21 +439,23 @@ nfs4_get_open_state(struct inode *inode, struct nfs4_state_owner *owner)
if (state)
goto out;
new = nfs4_alloc_open_state();
+ spin_lock(&owner->so_lock);
spin_lock(&inode->i_lock);
state = __nfs4_find_state_byowner(inode, owner);
if (state == NULL && new != NULL) {
state = new;
- /* Caller *must* be holding owner->so_sem */
- /* Note: The reclaim code dictates that we add stateless
- * and read-only stateids to the end of the list */
- list_add_tail(&state->open_states, &owner->so_states);
state->owner = owner;
atomic_inc(&owner->so_count);
list_add(&state->inode_states, &nfsi->open_states);
state->inode = igrab(inode);
spin_unlock(&inode->i_lock);
+ /* Note: The reclaim code dictates that we add stateless
+ * and read-only stateids to the end of the list */
+ list_add_tail(&state->open_states, &owner->so_states);
+ spin_unlock(&owner->so_lock);
} else {
spin_unlock(&inode->i_lock);
+ spin_unlock(&owner->so_lock);
if (new)
nfs4_free_open_state(new);
}
@@ -461,19 +465,21 @@ out:
/*
* Beware! Caller must be holding exactly one
- * reference to clp->cl_sem and owner->so_sema!
+ * reference to clp->cl_sem!
*/
void nfs4_put_open_state(struct nfs4_state *state)
{
struct inode *inode = state->inode;
struct nfs4_state_owner *owner = state->owner;
- if (!atomic_dec_and_lock(&state->count, &inode->i_lock))
+ if (!atomic_dec_and_lock(&state->count, &owner->so_lock))
return;
+ spin_lock(&inode->i_lock);
if (!list_empty(&state->inode_states))
list_del(&state->inode_states);
- spin_unlock(&inode->i_lock);
list_del(&state->open_states);
+ spin_unlock(&inode->i_lock);
+ spin_unlock(&owner->so_lock);
iput(inode);
BUG_ON (state->state != 0);
nfs4_free_open_state(state);
@@ -481,20 +487,17 @@ void nfs4_put_open_state(struct nfs4_state *state)
}
/*
- * Beware! Caller must be holding no references to clp->cl_sem!
- * of owner->so_sema!
+ * Close the current file.
*/
void nfs4_close_state(struct nfs4_state *state, mode_t mode)
{
struct inode *inode = state->inode;
struct nfs4_state_owner *owner = state->owner;
- struct nfs4_client *clp = owner->so_client;
int newstate;
atomic_inc(&owner->so_count);
- down_read(&clp->cl_sem);
- down(&owner->so_sema);
/* Protect against nfs4_find_state() */
+ spin_lock(&owner->so_lock);
spin_lock(&inode->i_lock);
if (mode & FMODE_READ)
state->nreaders--;
@@ -507,6 +510,7 @@ void nfs4_close_state(struct nfs4_state *state, mode_t mode)
list_move_tail(&state->open_states, &owner->so_states);
}
spin_unlock(&inode->i_lock);
+ spin_unlock(&owner->so_lock);
newstate = 0;
if (state->state != 0) {
if (state->nreaders)
@@ -515,14 +519,16 @@ void nfs4_close_state(struct nfs4_state *state, mode_t mode)
newstate |= FMODE_WRITE;
if (state->state == newstate)
goto out;
- if (nfs4_do_close(inode, state, newstate) == -EINPROGRESS)
+ if (test_bit(NFS_DELEGATED_STATE, &state->flags)) {
+ state->state = newstate;
+ goto out;
+ }
+ if (nfs4_do_close(inode, state, newstate) == 0)
return;
}
out:
nfs4_put_open_state(state);
- up(&owner->so_sema);
nfs4_put_state_owner(owner);
- up_read(&clp->cl_sem);
}
/*
@@ -546,19 +552,16 @@ __nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
* Return a compatible lock_state. If no initialized lock_state structure
* exists, return an uninitialized one.
*
- * The caller must be holding state->lock_sema
*/
static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
{
struct nfs4_lock_state *lsp;
struct nfs4_client *clp = state->owner->so_client;
- lsp = kmalloc(sizeof(*lsp), GFP_KERNEL);
+ lsp = kzalloc(sizeof(*lsp), GFP_KERNEL);
if (lsp == NULL)
return NULL;
- lsp->ls_flags = 0;
- lsp->ls_seqid = 0; /* arbitrary */
- memset(lsp->ls_stateid.data, 0, sizeof(lsp->ls_stateid.data));
+ lsp->ls_seqid.sequence = &state->owner->so_sequence;
atomic_set(&lsp->ls_count, 1);
lsp->ls_owner = fl_owner;
spin_lock(&clp->cl_lock);
@@ -572,7 +575,7 @@ static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, f
* Return a compatible lock_state. If no initialized lock_state structure
* exists, return an uninitialized one.
*
- * The caller must be holding state->lock_sema and clp->cl_sem
+ * The caller must be holding clp->cl_sem
*/
static struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_owner_t owner)
{
@@ -605,7 +608,7 @@ static struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_
* Release reference to lock_state, and free it if we see that
* it is no longer in use
*/
-static void nfs4_put_lock_state(struct nfs4_lock_state *lsp)
+void nfs4_put_lock_state(struct nfs4_lock_state *lsp)
{
struct nfs4_state *state;
@@ -673,29 +676,94 @@ void nfs4_copy_stateid(nfs4_stateid *dst, struct nfs4_state *state, fl_owner_t f
nfs4_put_lock_state(lsp);
}
-/*
-* Called with state->lock_sema and clp->cl_sem held.
-*/
-void nfs4_increment_lock_seqid(int status, struct nfs4_lock_state *lsp)
+struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter)
{
- if (status == NFS_OK || seqid_mutating_err(-status))
- lsp->ls_seqid++;
+ struct nfs_seqid *new;
+
+ new = kmalloc(sizeof(*new), GFP_KERNEL);
+ if (new != NULL) {
+ new->sequence = counter;
+ INIT_LIST_HEAD(&new->list);
+ }
+ return new;
+}
+
+void nfs_free_seqid(struct nfs_seqid *seqid)
+{
+ struct rpc_sequence *sequence = seqid->sequence->sequence;
+
+ if (!list_empty(&seqid->list)) {
+ spin_lock(&sequence->lock);
+ list_del(&seqid->list);
+ spin_unlock(&sequence->lock);
+ }
+ rpc_wake_up_next(&sequence->wait);
+ kfree(seqid);
}
/*
-* Called with sp->so_sema and clp->cl_sem held.
-*
-* Increment the seqid if the OPEN/OPEN_DOWNGRADE/CLOSE succeeded, or
-* failed with a seqid incrementing error -
-* see comments nfs_fs.h:seqid_mutating_error()
-*/
-void nfs4_increment_seqid(int status, struct nfs4_state_owner *sp)
-{
- if (status == NFS_OK || seqid_mutating_err(-status))
- sp->so_seqid++;
- /* If the server returns BAD_SEQID, unhash state_owner here */
- if (status == -NFS4ERR_BAD_SEQID)
+ * Increment the seqid if the OPEN/OPEN_DOWNGRADE/CLOSE succeeded, or
+ * failed with a seqid incrementing error -
+ * see comments nfs_fs.h:seqid_mutating_error()
+ */
+static inline void nfs_increment_seqid(int status, struct nfs_seqid *seqid)
+{
+ switch (status) {
+ case 0:
+ break;
+ case -NFS4ERR_BAD_SEQID:
+ case -NFS4ERR_STALE_CLIENTID:
+ case -NFS4ERR_STALE_STATEID:
+ case -NFS4ERR_BAD_STATEID:
+ case -NFS4ERR_BADXDR:
+ case -NFS4ERR_RESOURCE:
+ case -NFS4ERR_NOFILEHANDLE:
+ /* Non-seqid mutating errors */
+ return;
+ };
+ /*
+ * Note: no locking needed as we are guaranteed to be first
+ * on the sequence list
+ */
+ seqid->sequence->counter++;
+}
+
+void nfs_increment_open_seqid(int status, struct nfs_seqid *seqid)
+{
+ if (status == -NFS4ERR_BAD_SEQID) {
+ struct nfs4_state_owner *sp = container_of(seqid->sequence,
+ struct nfs4_state_owner, so_seqid);
nfs4_drop_state_owner(sp);
+ }
+ return nfs_increment_seqid(status, seqid);
+}
+
+/*
+ * Increment the seqid if the LOCK/LOCKU succeeded, or
+ * failed with a seqid incrementing error -
+ * see comments nfs_fs.h:seqid_mutating_error()
+ */
+void nfs_increment_lock_seqid(int status, struct nfs_seqid *seqid)
+{
+ return nfs_increment_seqid(status, seqid);
+}
+
+int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task)
+{
+ struct rpc_sequence *sequence = seqid->sequence->sequence;
+ int status = 0;
+
+ if (sequence->list.next == &seqid->list)
+ goto out;
+ spin_lock(&sequence->lock);
+ if (!list_empty(&sequence->list)) {
+ rpc_sleep_on(&sequence->wait, task, NULL, NULL);
+ status = -EAGAIN;
+ } else
+ list_add(&seqid->list, &sequence->list);
+ spin_unlock(&sequence->lock);
+out:
+ return status;
}
static int reclaimer(void *);
@@ -791,8 +859,6 @@ static int nfs4_reclaim_open_state(struct nfs4_state_recovery_ops *ops, struct n
if (state->state == 0)
continue;
status = ops->recover_open(sp, state);
- list_for_each_entry(lock, &state->lock_states, ls_locks)
- lock->ls_flags &= ~NFS_LOCK_INITIALIZED;
if (status >= 0) {
status = nfs4_reclaim_locks(ops, state);
if (status < 0)
@@ -831,6 +897,28 @@ out_err:
return status;
}
+static void nfs4_state_mark_reclaim(struct nfs4_client *clp)
+{
+ struct nfs4_state_owner *sp;
+ struct nfs4_state *state;
+ struct nfs4_lock_state *lock;
+
+ /* Reset all sequence ids to zero */
+ list_for_each_entry(sp, &clp->cl_state_owners, so_list) {
+ sp->so_seqid.counter = 0;
+ sp->so_seqid.flags = 0;
+ spin_lock(&sp->so_lock);
+ list_for_each_entry(state, &sp->so_states, open_states) {
+ list_for_each_entry(lock, &state->lock_states, ls_locks) {
+ lock->ls_seqid.counter = 0;
+ lock->ls_seqid.flags = 0;
+ lock->ls_flags &= ~NFS_LOCK_INITIALIZED;
+ }
+ }
+ spin_unlock(&sp->so_lock);
+ }
+}
+
static int reclaimer(void *ptr)
{
struct reclaimer_args *args = (struct reclaimer_args *)ptr;
@@ -864,6 +952,7 @@ restart_loop:
default:
ops = &nfs4_network_partition_recovery_ops;
};
+ nfs4_state_mark_reclaim(clp);
status = __nfs4_init_client(clp);
if (status)
goto out_error;
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index 6c564ef9489..fbbace8a30c 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -95,6 +95,8 @@ static int nfs_stat_to_errno(int);
#define decode_getattr_maxsz (op_decode_hdr_maxsz + nfs4_fattr_maxsz)
#define encode_savefh_maxsz (op_encode_hdr_maxsz)
#define decode_savefh_maxsz (op_decode_hdr_maxsz)
+#define encode_restorefh_maxsz (op_encode_hdr_maxsz)
+#define decode_restorefh_maxsz (op_decode_hdr_maxsz)
#define encode_fsinfo_maxsz (op_encode_hdr_maxsz + 2)
#define decode_fsinfo_maxsz (op_decode_hdr_maxsz + 11)
#define encode_renew_maxsz (op_encode_hdr_maxsz + 3)
@@ -157,16 +159,20 @@ static int nfs_stat_to_errno(int);
op_decode_hdr_maxsz + 2)
#define NFS4_enc_write_sz (compound_encode_hdr_maxsz + \
encode_putfh_maxsz + \
- op_encode_hdr_maxsz + 8)
+ op_encode_hdr_maxsz + 8 + \
+ encode_getattr_maxsz)
#define NFS4_dec_write_sz (compound_decode_hdr_maxsz + \
decode_putfh_maxsz + \
- op_decode_hdr_maxsz + 4)
+ op_decode_hdr_maxsz + 4 + \
+ decode_getattr_maxsz)
#define NFS4_enc_commit_sz (compound_encode_hdr_maxsz + \
encode_putfh_maxsz + \
- op_encode_hdr_maxsz + 3)
+ op_encode_hdr_maxsz + 3 + \
+ encode_getattr_maxsz)
#define NFS4_dec_commit_sz (compound_decode_hdr_maxsz + \
decode_putfh_maxsz + \
- op_decode_hdr_maxsz + 2)
+ op_decode_hdr_maxsz + 2 + \
+ decode_getattr_maxsz)
#define NFS4_enc_open_sz (compound_encode_hdr_maxsz + \
encode_putfh_maxsz + \
op_encode_hdr_maxsz + \
@@ -196,17 +202,21 @@ static int nfs_stat_to_errno(int);
#define NFS4_enc_open_downgrade_sz \
(compound_encode_hdr_maxsz + \
encode_putfh_maxsz + \
- op_encode_hdr_maxsz + 7)
+ op_encode_hdr_maxsz + 7 + \
+ encode_getattr_maxsz)
#define NFS4_dec_open_downgrade_sz \
(compound_decode_hdr_maxsz + \
decode_putfh_maxsz + \
- op_decode_hdr_maxsz + 4)
+ op_decode_hdr_maxsz + 4 + \
+ decode_getattr_maxsz)
#define NFS4_enc_close_sz (compound_encode_hdr_maxsz + \
encode_putfh_maxsz + \
- op_encode_hdr_maxsz + 5)
+ op_encode_hdr_maxsz + 5 + \
+ encode_getattr_maxsz)
#define NFS4_dec_close_sz (compound_decode_hdr_maxsz + \
decode_putfh_maxsz + \
- op_decode_hdr_maxsz + 4)
+ op_decode_hdr_maxsz + 4 + \
+ decode_getattr_maxsz)
#define NFS4_enc_setattr_sz (compound_encode_hdr_maxsz + \
encode_putfh_maxsz + \
op_encode_hdr_maxsz + 4 + \
@@ -300,30 +310,44 @@ static int nfs_stat_to_errno(int);
decode_getfh_maxsz)
#define NFS4_enc_remove_sz (compound_encode_hdr_maxsz + \
encode_putfh_maxsz + \
- encode_remove_maxsz)
+ encode_remove_maxsz + \
+ encode_getattr_maxsz)
#define NFS4_dec_remove_sz (compound_decode_hdr_maxsz + \
decode_putfh_maxsz + \
- op_decode_hdr_maxsz + 5)
+ op_decode_hdr_maxsz + 5 + \
+ decode_getattr_maxsz)
#define NFS4_enc_rename_sz (compound_encode_hdr_maxsz + \
encode_putfh_maxsz + \
encode_savefh_maxsz + \
encode_putfh_maxsz + \
- encode_rename_maxsz)
+ encode_rename_maxsz + \
+ encode_getattr_maxsz + \
+ encode_restorefh_maxsz + \
+ encode_getattr_maxsz)
#define NFS4_dec_rename_sz (compound_decode_hdr_maxsz + \
decode_putfh_maxsz + \
decode_savefh_maxsz + \
decode_putfh_maxsz + \
- decode_rename_maxsz)
+ decode_rename_maxsz + \
+ decode_getattr_maxsz + \
+ decode_restorefh_maxsz + \
+ decode_getattr_maxsz)
#define NFS4_enc_link_sz (compound_encode_hdr_maxsz + \
encode_putfh_maxsz + \
encode_savefh_maxsz + \
encode_putfh_maxsz + \
- encode_link_maxsz)
+ encode_link_maxsz + \
+ decode_getattr_maxsz + \
+ encode_restorefh_maxsz + \
+ decode_getattr_maxsz)
#define NFS4_dec_link_sz (compound_decode_hdr_maxsz + \
decode_putfh_maxsz + \
decode_savefh_maxsz + \
decode_putfh_maxsz + \
- decode_link_maxsz)
+ decode_link_maxsz + \
+ decode_getattr_maxsz + \
+ decode_restorefh_maxsz + \
+ decode_getattr_maxsz)
#define NFS4_enc_symlink_sz (compound_encode_hdr_maxsz + \
encode_putfh_maxsz + \
encode_symlink_maxsz + \
@@ -336,14 +360,20 @@ static int nfs_stat_to_errno(int);
decode_getfh_maxsz)
#define NFS4_enc_create_sz (compound_encode_hdr_maxsz + \
encode_putfh_maxsz + \
+ encode_savefh_maxsz + \
encode_create_maxsz + \
+ encode_getfh_maxsz + \
encode_getattr_maxsz + \
- encode_getfh_maxsz)
+ encode_restorefh_maxsz + \
+ encode_getattr_maxsz)
#define NFS4_dec_create_sz (compound_decode_hdr_maxsz + \
decode_putfh_maxsz + \
+ decode_savefh_maxsz + \
decode_create_maxsz + \
+ decode_getfh_maxsz + \
decode_getattr_maxsz + \
- decode_getfh_maxsz)
+ decode_restorefh_maxsz + \
+ decode_getattr_maxsz)
#define NFS4_enc_pathconf_sz (compound_encode_hdr_maxsz + \
encode_putfh_maxsz + \
encode_getattr_maxsz)
@@ -602,10 +632,10 @@ static int encode_close(struct xdr_stream *xdr, const struct nfs_closeargs *arg)
{
uint32_t *p;
- RESERVE_SPACE(8+sizeof(arg->stateid.data));
+ RESERVE_SPACE(8+sizeof(arg->stateid->data));
WRITE32(OP_CLOSE);
- WRITE32(arg->seqid);
- WRITEMEM(arg->stateid.data, sizeof(arg->stateid.data));
+ WRITE32(arg->seqid->sequence->counter);
+ WRITEMEM(arg->stateid->data, sizeof(arg->stateid->data));
return 0;
}
@@ -729,22 +759,18 @@ static int encode_lock(struct xdr_stream *xdr, const struct nfs_lockargs *arg)
WRITE64(arg->length);
WRITE32(opargs->new_lock_owner);
if (opargs->new_lock_owner){
- struct nfs_open_to_lock *ol = opargs->u.open_lock;
-
RESERVE_SPACE(40);
- WRITE32(ol->open_seqid);
- WRITEMEM(&ol->open_stateid, sizeof(ol->open_stateid));
- WRITE32(ol->lock_seqid);
- WRITE64(ol->lock_owner.clientid);
+ WRITE32(opargs->open_seqid->sequence->counter);
+ WRITEMEM(opargs->open_stateid->data, sizeof(opargs->open_stateid->data));
+ WRITE32(opargs->lock_seqid->sequence->counter);
+ WRITE64(opargs->lock_owner.clientid);
WRITE32(4);
- WRITE32(ol->lock_owner.id);
+ WRITE32(opargs->lock_owner.id);
}
else {
- struct nfs_exist_lock *el = opargs->u.exist_lock;
-
RESERVE_SPACE(20);
- WRITEMEM(&el->stateid, sizeof(el->stateid));
- WRITE32(el->seqid);
+ WRITEMEM(opargs->lock_stateid->data, sizeof(opargs->lock_stateid->data));
+ WRITE32(opargs->lock_seqid->sequence->counter);
}
return 0;
@@ -775,8 +801,8 @@ static int encode_locku(struct xdr_stream *xdr, const struct nfs_lockargs *arg)
RESERVE_SPACE(44);
WRITE32(OP_LOCKU);
WRITE32(arg->type);
- WRITE32(opargs->seqid);
- WRITEMEM(&opargs->stateid, sizeof(opargs->stateid));
+ WRITE32(opargs->seqid->sequence->counter);
+ WRITEMEM(opargs->stateid->data, sizeof(opargs->stateid->data));
WRITE64(arg->offset);
WRITE64(arg->length);
@@ -826,7 +852,7 @@ static inline void encode_openhdr(struct xdr_stream *xdr, const struct nfs_opena
*/
RESERVE_SPACE(8);
WRITE32(OP_OPEN);
- WRITE32(arg->seqid);
+ WRITE32(arg->seqid->sequence->counter);
encode_share_access(xdr, arg->open_flags);
RESERVE_SPACE(16);
WRITE64(arg->clientid);
@@ -941,7 +967,7 @@ static int encode_open_confirm(struct xdr_stream *xdr, const struct nfs_open_con
RESERVE_SPACE(8+sizeof(arg->stateid.data));
WRITE32(OP_OPEN_CONFIRM);
WRITEMEM(arg->stateid.data, sizeof(arg->stateid.data));
- WRITE32(arg->seqid);
+ WRITE32(arg->seqid->sequence->counter);
return 0;
}
@@ -950,10 +976,10 @@ static int encode_open_downgrade(struct xdr_stream *xdr, const struct nfs_closea
{
uint32_t *p;
- RESERVE_SPACE(8+sizeof(arg->stateid.data));
+ RESERVE_SPACE(8+sizeof(arg->stateid->data));
WRITE32(OP_OPEN_DOWNGRADE);
- WRITEMEM(arg->stateid.data, sizeof(arg->stateid.data));
- WRITE32(arg->seqid);
+ WRITEMEM(arg->stateid->data, sizeof(arg->stateid->data));
+ WRITE32(arg->seqid->sequence->counter);
encode_share_access(xdr, arg->open_flags);
return 0;
}
@@ -1117,6 +1143,17 @@ static int encode_renew(struct xdr_stream *xdr, const struct nfs4_client *client
}
static int
+encode_restorefh(struct xdr_stream *xdr)
+{
+ uint32_t *p;
+
+ RESERVE_SPACE(4);
+ WRITE32(OP_RESTOREFH);
+
+ return 0;
+}
+
+static int
encode_setacl(struct xdr_stream *xdr, struct nfs_setaclargs *arg)
{
uint32_t *p;
@@ -1296,14 +1333,18 @@ static int nfs4_xdr_enc_remove(struct rpc_rqst *req, uint32_t *p, const struct n
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 2,
+ .nops = 3,
};
int status;
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
encode_compound_hdr(&xdr, &hdr);
- if ((status = encode_putfh(&xdr, args->fh)) == 0)
- status = encode_remove(&xdr, args->name);
+ if ((status = encode_putfh(&xdr, args->fh)) != 0)
+ goto out;
+ if ((status = encode_remove(&xdr, args->name)) != 0)
+ goto out;
+ status = encode_getfattr(&xdr, args->bitmask);
+out:
return status;
}
@@ -1314,7 +1355,7 @@ static int nfs4_xdr_enc_rename(struct rpc_rqst *req, uint32_t *p, const struct n
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 4,
+ .nops = 7,
};
int status;
@@ -1326,7 +1367,13 @@ static int nfs4_xdr_enc_rename(struct rpc_rqst *req, uint32_t *p, const struct n
goto out;
if ((status = encode_putfh(&xdr, args->new_dir)) != 0)
goto out;
- status = encode_rename(&xdr, args->old_name, args->new_name);
+ if ((status = encode_rename(&xdr, args->old_name, args->new_name)) != 0)
+ goto out;
+ if ((status = encode_getfattr(&xdr, args->bitmask)) != 0)
+ goto out;
+ if ((status = encode_restorefh(&xdr)) != 0)
+ goto out;
+ status = encode_getfattr(&xdr, args->bitmask);
out:
return status;
}
@@ -1338,7 +1385,7 @@ static int nfs4_xdr_enc_link(struct rpc_rqst *req, uint32_t *p, const struct nfs
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 4,
+ .nops = 7,
};
int status;
@@ -1350,7 +1397,13 @@ static int nfs4_xdr_enc_link(struct rpc_rqst *req, uint32_t *p, const struct nfs
goto out;
if ((status = encode_putfh(&xdr, args->dir_fh)) != 0)
goto out;
- status = encode_link(&xdr, args->name);
+ if ((status = encode_link(&xdr, args->name)) != 0)
+ goto out;
+ if ((status = encode_getfattr(&xdr, args->bitmask)) != 0)
+ goto out;
+ if ((status = encode_restorefh(&xdr)) != 0)
+ goto out;
+ status = encode_getfattr(&xdr, args->bitmask);
out:
return status;
}
@@ -1362,7 +1415,7 @@ static int nfs4_xdr_enc_create(struct rpc_rqst *req, uint32_t *p, const struct n
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 4,
+ .nops = 7,
};
int status;
@@ -1370,10 +1423,16 @@ static int nfs4_xdr_enc_create(struct rpc_rqst *req, uint32_t *p, const struct n
encode_compound_hdr(&xdr, &hdr);
if ((status = encode_putfh(&xdr, args->dir_fh)) != 0)
goto out;
+ if ((status = encode_savefh(&xdr)) != 0)
+ goto out;
if ((status = encode_create(&xdr, args)) != 0)
goto out;
if ((status = encode_getfh(&xdr)) != 0)
goto out;
+ if ((status = encode_getfattr(&xdr, args->bitmask)) != 0)
+ goto out;
+ if ((status = encode_restorefh(&xdr)) != 0)
+ goto out;
status = encode_getfattr(&xdr, args->bitmask);
out:
return status;
@@ -1412,7 +1471,7 @@ static int nfs4_xdr_enc_close(struct rpc_rqst *req, uint32_t *p, struct nfs_clos
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 2,
+ .nops = 3,
};
int status;
@@ -1422,6 +1481,9 @@ static int nfs4_xdr_enc_close(struct rpc_rqst *req, uint32_t *p, struct nfs_clos
if(status)
goto out;
status = encode_close(&xdr, args);
+ if (status != 0)
+ goto out;
+ status = encode_getfattr(&xdr, args->bitmask);
out:
return status;
}
@@ -1433,15 +1495,21 @@ static int nfs4_xdr_enc_open(struct rpc_rqst *req, uint32_t *p, struct nfs_opena
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 4,
+ .nops = 7,
};
int status;
+ status = nfs_wait_on_sequence(args->seqid, req->rq_task);
+ if (status != 0)
+ goto out;
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
encode_compound_hdr(&xdr, &hdr);
status = encode_putfh(&xdr, args->fh);
if (status)
goto out;
+ status = encode_savefh(&xdr);
+ if (status)
+ goto out;
status = encode_open(&xdr, args);
if (status)
goto out;
@@ -1449,6 +1517,12 @@ static int nfs4_xdr_enc_open(struct rpc_rqst *req, uint32_t *p, struct nfs_opena
if (status)
goto out;
status = encode_getfattr(&xdr, args->bitmask);
+ if (status)
+ goto out;
+ status = encode_restorefh(&xdr);
+ if (status)
+ goto out;
+ status = encode_getfattr(&xdr, args->bitmask);
out:
return status;
}
@@ -1464,6 +1538,9 @@ static int nfs4_xdr_enc_open_confirm(struct rpc_rqst *req, uint32_t *p, struct n
};
int status;
+ status = nfs_wait_on_sequence(args->seqid, req->rq_task);
+ if (status != 0)
+ goto out;
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
encode_compound_hdr(&xdr, &hdr);
status = encode_putfh(&xdr, args->fh);
@@ -1485,6 +1562,9 @@ static int nfs4_xdr_enc_open_noattr(struct rpc_rqst *req, uint32_t *p, struct nf
};
int status;
+ status = nfs_wait_on_sequence(args->seqid, req->rq_task);
+ if (status != 0)
+ goto out;
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
encode_compound_hdr(&xdr, &hdr);
status = encode_putfh(&xdr, args->fh);
@@ -1502,7 +1582,7 @@ static int nfs4_xdr_enc_open_downgrade(struct rpc_rqst *req, uint32_t *p, struct
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 2,
+ .nops = 3,
};
int status;
@@ -1512,6 +1592,9 @@ static int nfs4_xdr_enc_open_downgrade(struct rpc_rqst *req, uint32_t *p, struct
if (status)
goto out;
status = encode_open_downgrade(&xdr, args);
+ if (status != 0)
+ goto out;
+ status = encode_getfattr(&xdr, args->bitmask);
out:
return status;
}
@@ -1525,8 +1608,15 @@ static int nfs4_xdr_enc_lock(struct rpc_rqst *req, uint32_t *p, struct nfs_locka
struct compound_hdr hdr = {
.nops = 2,
};
+ struct nfs_lock_opargs *opargs = args->u.lock;
int status;
+ status = nfs_wait_on_sequence(opargs->lock_seqid, req->rq_task);
+ if (status != 0)
+ goto out;
+ /* Do we need to do an open_to_lock_owner? */
+ if (opargs->lock_seqid->sequence->flags & NFS_SEQID_CONFIRMED)
+ opargs->new_lock_owner = 0;
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
encode_compound_hdr(&xdr, &hdr);
status = encode_putfh(&xdr, args->fh);
@@ -1713,7 +1803,7 @@ static int nfs4_xdr_enc_write(struct rpc_rqst *req, uint32_t *p, struct nfs_writ
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 2,
+ .nops = 3,
};
int status;
@@ -1723,6 +1813,9 @@ static int nfs4_xdr_enc_write(struct rpc_rqst *req, uint32_t *p, struct nfs_writ
if (status)
goto out;
status = encode_write(&xdr, args);
+ if (status)
+ goto out;
+ status = encode_getfattr(&xdr, args->bitmask);
out:
return status;
}
@@ -1734,7 +1827,7 @@ static int nfs4_xdr_enc_commit(struct rpc_rqst *req, uint32_t *p, struct nfs_wri
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 2,
+ .nops = 3,
};
int status;
@@ -1744,6 +1837,9 @@ static int nfs4_xdr_enc_commit(struct rpc_rqst *req, uint32_t *p, struct nfs_wri
if (status)
goto out;
status = encode_commit(&xdr, args);
+ if (status)
+ goto out;
+ status = encode_getfattr(&xdr, args->bitmask);
out:
return status;
}
@@ -2670,8 +2766,7 @@ static int decode_server_caps(struct xdr_stream *xdr, struct nfs4_server_caps_re
goto xdr_error;
status = verify_attr_len(xdr, savep, attrlen);
xdr_error:
- if (status != 0)
- printk(KERN_NOTICE "%s: xdr error %d!\n", __FUNCTION__, -status);
+ dprintk("%s: xdr returned %d!\n", __FUNCTION__, -status);
return status;
}
@@ -2704,8 +2799,7 @@ static int decode_statfs(struct xdr_stream *xdr, struct nfs_fsstat *fsstat)
status = verify_attr_len(xdr, savep, attrlen);
xdr_error:
- if (status != 0)
- printk(KERN_NOTICE "%s: xdr error %d!\n", __FUNCTION__, -status);
+ dprintk("%s: xdr returned %d!\n", __FUNCTION__, -status);
return status;
}
@@ -2730,8 +2824,7 @@ static int decode_pathconf(struct xdr_stream *xdr, struct nfs_pathconf *pathconf
status = verify_attr_len(xdr, savep, attrlen);
xdr_error:
- if (status != 0)
- printk(KERN_NOTICE "%s: xdr error %d!\n", __FUNCTION__, -status);
+ dprintk("%s: xdr returned %d!\n", __FUNCTION__, -status);
return status;
}
@@ -2787,13 +2880,10 @@ static int decode_getfattr(struct xdr_stream *xdr, struct nfs_fattr *fattr, cons
goto xdr_error;
if ((status = decode_attr_time_modify(xdr, bitmap, &fattr->mtime)) != 0)
goto xdr_error;
- if ((status = verify_attr_len(xdr, savep, attrlen)) == 0) {
+ if ((status = verify_attr_len(xdr, savep, attrlen)) == 0)
fattr->valid = NFS_ATTR_FATTR | NFS_ATTR_FATTR_V3 | NFS_ATTR_FATTR_V4;
- fattr->timestamp = jiffies;
- }
xdr_error:
- if (status != 0)
- printk(KERN_NOTICE "%s: xdr error %d!\n", __FUNCTION__, -status);
+ dprintk("%s: xdr returned %d\n", __FUNCTION__, -status);
return status;
}
@@ -2826,8 +2916,7 @@ static int decode_fsinfo(struct xdr_stream *xdr, struct nfs_fsinfo *fsinfo)
status = verify_attr_len(xdr, savep, attrlen);
xdr_error:
- if (status != 0)
- printk(KERN_NOTICE "%s: xdr error %d!\n", __FUNCTION__, -status);
+ dprintk("%s: xdr returned %d!\n", __FUNCTION__, -status);
return status;
}
@@ -2890,8 +2979,8 @@ static int decode_lock(struct xdr_stream *xdr, struct nfs_lockres *res)
status = decode_op_hdr(xdr, OP_LOCK);
if (status == 0) {
- READ_BUF(sizeof(nfs4_stateid));
- COPYMEM(&res->u.stateid, sizeof(res->u.stateid));
+ READ_BUF(sizeof(res->u.stateid.data));
+ COPYMEM(res->u.stateid.data, sizeof(res->u.stateid.data));
} else if (status == -NFS4ERR_DENIED)
return decode_lock_denied(xdr, &res->u.denied);
return status;
@@ -2913,8 +3002,8 @@ static int decode_locku(struct xdr_stream *xdr, struct nfs_lockres *res)
status = decode_op_hdr(xdr, OP_LOCKU);
if (status == 0) {
- READ_BUF(sizeof(nfs4_stateid));
- COPYMEM(&res->u.stateid, sizeof(res->u.stateid));
+ READ_BUF(sizeof(res->u.stateid.data));
+ COPYMEM(res->u.stateid.data, sizeof(res->u.stateid.data));
}
return status;
}
@@ -2994,7 +3083,7 @@ static int decode_open(struct xdr_stream *xdr, struct nfs_openres *res)
p += bmlen;
return decode_delegation(xdr, res);
xdr_error:
- printk(KERN_NOTICE "%s: xdr error!\n", __FUNCTION__);
+ dprintk("%s: Bitmap too large! Length = %u\n", __FUNCTION__, bmlen);
return -EIO;
}
@@ -3208,6 +3297,12 @@ static int decode_renew(struct xdr_stream *xdr)
return decode_op_hdr(xdr, OP_RENEW);
}
+static int
+decode_restorefh(struct xdr_stream *xdr)
+{
+ return decode_op_hdr(xdr, OP_RESTOREFH);
+}
+
static int decode_getacl(struct xdr_stream *xdr, struct rpc_rqst *req,
size_t *acl_len)
{
@@ -3243,7 +3338,8 @@ static int decode_getacl(struct xdr_stream *xdr, struct rpc_rqst *req,
if (attrlen <= *acl_len)
xdr_read_pages(xdr, attrlen);
*acl_len = attrlen;
- }
+ } else
+ status = -EOPNOTSUPP;
out:
return status;
@@ -3352,6 +3448,9 @@ static int nfs4_xdr_dec_open_downgrade(struct rpc_rqst *rqstp, uint32_t *p, stru
if (status)
goto out;
status = decode_open_downgrade(&xdr, res);
+ if (status != 0)
+ goto out;
+ decode_getfattr(&xdr, res->fattr, res->server);
out:
return status;
}
@@ -3424,7 +3523,7 @@ out:
/*
* Decode REMOVE response
*/
-static int nfs4_xdr_dec_remove(struct rpc_rqst *rqstp, uint32_t *p, struct nfs4_change_info *cinfo)
+static int nfs4_xdr_dec_remove(struct rpc_rqst *rqstp, uint32_t *p, struct nfs4_remove_res *res)
{
struct xdr_stream xdr;
struct compound_hdr hdr;
@@ -3433,8 +3532,11 @@ static int nfs4_xdr_dec_remove(struct rpc_rqst *rqstp, uint32_t *p, struct nfs4_
xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
if ((status = decode_compound_hdr(&xdr, &hdr)) != 0)
goto out;
- if ((status = decode_putfh(&xdr)) == 0)
- status = decode_remove(&xdr, cinfo);
+ if ((status = decode_putfh(&xdr)) != 0)
+ goto out;
+ if ((status = decode_remove(&xdr, &res->cinfo)) != 0)
+ goto out;
+ decode_getfattr(&xdr, res->dir_attr, res->server);
out:
return status;
}
@@ -3457,7 +3559,14 @@ static int nfs4_xdr_dec_rename(struct rpc_rqst *rqstp, uint32_t *p, struct nfs4_
goto out;
if ((status = decode_putfh(&xdr)) != 0)
goto out;
- status = decode_rename(&xdr, &res->old_cinfo, &res->new_cinfo);
+ if ((status = decode_rename(&xdr, &res->old_cinfo, &res->new_cinfo)) != 0)
+ goto out;
+ /* Current FH is target directory */
+ if (decode_getfattr(&xdr, res->new_fattr, res->server) != 0)
+ goto out;
+ if ((status = decode_restorefh(&xdr)) != 0)
+ goto out;
+ decode_getfattr(&xdr, res->old_fattr, res->server);
out:
return status;
}
@@ -3465,7 +3574,7 @@ out:
/*
* Decode LINK response
*/
-static int nfs4_xdr_dec_link(struct rpc_rqst *rqstp, uint32_t *p, struct nfs4_change_info *cinfo)
+static int nfs4_xdr_dec_link(struct rpc_rqst *rqstp, uint32_t *p, struct nfs4_link_res *res)
{
struct xdr_stream xdr;
struct compound_hdr hdr;
@@ -3480,7 +3589,17 @@ static int nfs4_xdr_dec_link(struct rpc_rqst *rqstp, uint32_t *p, struct nfs4_ch
goto out;
if ((status = decode_putfh(&xdr)) != 0)
goto out;
- status = decode_link(&xdr, cinfo);
+ if ((status = decode_link(&xdr, &res->cinfo)) != 0)
+ goto out;
+ /*
+ * Note order: OP_LINK leaves the directory as the current
+ * filehandle.
+ */
+ if (decode_getfattr(&xdr, res->dir_attr, res->server) != 0)
+ goto out;
+ if ((status = decode_restorefh(&xdr)) != 0)
+ goto out;
+ decode_getfattr(&xdr, res->fattr, res->server);
out:
return status;
}
@@ -3499,13 +3618,17 @@ static int nfs4_xdr_dec_create(struct rpc_rqst *rqstp, uint32_t *p, struct nfs4_
goto out;
if ((status = decode_putfh(&xdr)) != 0)
goto out;
+ if ((status = decode_savefh(&xdr)) != 0)
+ goto out;
if ((status = decode_create(&xdr,&res->dir_cinfo)) != 0)
goto out;
if ((status = decode_getfh(&xdr, res->fh)) != 0)
goto out;
- status = decode_getfattr(&xdr, res->fattr, res->server);
- if (status == NFS4ERR_DELAY)
- status = 0;
+ if (decode_getfattr(&xdr, res->fattr, res->server) != 0)
+ goto out;
+ if ((status = decode_restorefh(&xdr)) != 0)
+ goto out;
+ decode_getfattr(&xdr, res->dir_fattr, res->server);
out:
return status;
}
@@ -3623,6 +3746,15 @@ static int nfs4_xdr_dec_close(struct rpc_rqst *rqstp, uint32_t *p, struct nfs_cl
if (status)
goto out;
status = decode_close(&xdr, res);
+ if (status != 0)
+ goto out;
+ /*
+ * Note: Server may do delete on close for this file
+ * in which case the getattr call will fail with
+ * an ESTALE error. Shouldn't be a problem,
+ * though, since fattr->valid will remain unset.
+ */
+ decode_getfattr(&xdr, res->fattr, res->server);
out:
return status;
}
@@ -3643,15 +3775,20 @@ static int nfs4_xdr_dec_open(struct rpc_rqst *rqstp, uint32_t *p, struct nfs_ope
status = decode_putfh(&xdr);
if (status)
goto out;
+ status = decode_savefh(&xdr);
+ if (status)
+ goto out;
status = decode_open(&xdr, res);
if (status)
goto out;
status = decode_getfh(&xdr, &res->fh);
if (status)
goto out;
- status = decode_getfattr(&xdr, res->f_attr, res->server);
- if (status == NFS4ERR_DELAY)
- status = 0;
+ if (decode_getfattr(&xdr, res->f_attr, res->server) != 0)
+ goto out;
+ if ((status = decode_restorefh(&xdr)) != 0)
+ goto out;
+ decode_getfattr(&xdr, res->dir_attr, res->server);
out:
return status;
}
@@ -3869,6 +4006,9 @@ static int nfs4_xdr_dec_write(struct rpc_rqst *rqstp, uint32_t *p, struct nfs_wr
if (status)
goto out;
status = decode_write(&xdr, res);
+ if (status)
+ goto out;
+ decode_getfattr(&xdr, res->fattr, res->server);
if (!status)
status = res->count;
out:
@@ -3892,6 +4032,9 @@ static int nfs4_xdr_dec_commit(struct rpc_rqst *rqstp, uint32_t *p, struct nfs_w
if (status)
goto out;
status = decode_commit(&xdr, res);
+ if (status)
+ goto out;
+ decode_getfattr(&xdr, res->fattr, res->server);
out:
return status;
}
diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c
index be23c3fb926..a48a003242c 100644
--- a/fs/nfs/proc.c
+++ b/fs/nfs/proc.c
@@ -61,7 +61,7 @@ nfs_proc_get_root(struct nfs_server *server, struct nfs_fh *fhandle,
int status;
dprintk("%s: call getattr\n", __FUNCTION__);
- fattr->valid = 0;
+ nfs_fattr_init(fattr);
status = rpc_call(server->client_sys, NFSPROC_GETATTR, fhandle, fattr, 0);
dprintk("%s: reply getattr: %d\n", __FUNCTION__, status);
if (status)
@@ -93,7 +93,7 @@ nfs_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle,
int status;
dprintk("NFS call getattr\n");
- fattr->valid = 0;
+ nfs_fattr_init(fattr);
status = rpc_call(server->client, NFSPROC_GETATTR,
fhandle, fattr, 0);
dprintk("NFS reply getattr: %d\n", status);
@@ -112,7 +112,7 @@ nfs_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
int status;
dprintk("NFS call setattr\n");
- fattr->valid = 0;
+ nfs_fattr_init(fattr);
status = rpc_call(NFS_CLIENT(inode), NFSPROC_SETATTR, &arg, fattr, 0);
if (status == 0)
nfs_setattr_update_inode(inode, sattr);
@@ -136,7 +136,7 @@ nfs_proc_lookup(struct inode *dir, struct qstr *name,
int status;
dprintk("NFS call lookup %s\n", name->name);
- fattr->valid = 0;
+ nfs_fattr_init(fattr);
status = rpc_call(NFS_CLIENT(dir), NFSPROC_LOOKUP, &arg, &res, 0);
dprintk("NFS reply lookup: %d\n", status);
return status;
@@ -174,7 +174,7 @@ static int nfs_proc_read(struct nfs_read_data *rdata)
dprintk("NFS call read %d @ %Ld\n", rdata->args.count,
(long long) rdata->args.offset);
- fattr->valid = 0;
+ nfs_fattr_init(fattr);
status = rpc_call_sync(NFS_CLIENT(inode), &msg, flags);
if (status >= 0) {
nfs_refresh_inode(inode, fattr);
@@ -203,10 +203,10 @@ static int nfs_proc_write(struct nfs_write_data *wdata)
dprintk("NFS call write %d @ %Ld\n", wdata->args.count,
(long long) wdata->args.offset);
- fattr->valid = 0;
+ nfs_fattr_init(fattr);
status = rpc_call_sync(NFS_CLIENT(inode), &msg, flags);
if (status >= 0) {
- nfs_refresh_inode(inode, fattr);
+ nfs_post_op_update_inode(inode, fattr);
wdata->res.count = wdata->args.count;
wdata->verf.committed = NFS_FILE_SYNC;
}
@@ -216,7 +216,7 @@ static int nfs_proc_write(struct nfs_write_data *wdata)
static int
nfs_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
- int flags)
+ int flags, struct nameidata *nd)
{
struct nfs_fh fhandle;
struct nfs_fattr fattr;
@@ -232,7 +232,7 @@ nfs_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
};
int status;
- fattr.valid = 0;
+ nfs_fattr_init(&fattr);
dprintk("NFS call create %s\n", dentry->d_name.name);
status = rpc_call(NFS_CLIENT(dir), NFSPROC_CREATE, &arg, &res, 0);
if (status == 0)
@@ -273,12 +273,13 @@ nfs_proc_mknod(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
sattr->ia_size = new_encode_dev(rdev);/* get out your barf bag */
}
- fattr.valid = 0;
+ nfs_fattr_init(&fattr);
status = rpc_call(NFS_CLIENT(dir), NFSPROC_CREATE, &arg, &res, 0);
+ nfs_mark_for_revalidate(dir);
if (status == -EINVAL && S_ISFIFO(mode)) {
sattr->ia_mode = mode;
- fattr.valid = 0;
+ nfs_fattr_init(&fattr);
status = rpc_call(NFS_CLIENT(dir), NFSPROC_CREATE, &arg, &res, 0);
}
if (status == 0)
@@ -305,6 +306,7 @@ nfs_proc_remove(struct inode *dir, struct qstr *name)
dprintk("NFS call remove %s\n", name->name);
status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
+ nfs_mark_for_revalidate(dir);
dprintk("NFS reply remove: %d\n", status);
return status;
@@ -331,8 +333,10 @@ nfs_proc_unlink_done(struct dentry *dir, struct rpc_task *task)
{
struct rpc_message *msg = &task->tk_msg;
- if (msg->rpc_argp)
+ if (msg->rpc_argp) {
+ nfs_mark_for_revalidate(dir->d_inode);
kfree(msg->rpc_argp);
+ }
return 0;
}
@@ -352,6 +356,8 @@ nfs_proc_rename(struct inode *old_dir, struct qstr *old_name,
dprintk("NFS call rename %s -> %s\n", old_name->name, new_name->name);
status = rpc_call(NFS_CLIENT(old_dir), NFSPROC_RENAME, &arg, NULL, 0);
+ nfs_mark_for_revalidate(old_dir);
+ nfs_mark_for_revalidate(new_dir);
dprintk("NFS reply rename: %d\n", status);
return status;
}
@@ -369,6 +375,7 @@ nfs_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
dprintk("NFS call link %s\n", name->name);
status = rpc_call(NFS_CLIENT(inode), NFSPROC_LINK, &arg, NULL, 0);
+ nfs_mark_for_revalidate(dir);
dprintk("NFS reply link: %d\n", status);
return status;
}
@@ -391,9 +398,10 @@ nfs_proc_symlink(struct inode *dir, struct qstr *name, struct qstr *path,
if (path->len > NFS2_MAXPATHLEN)
return -ENAMETOOLONG;
dprintk("NFS call symlink %s -> %s\n", name->name, path->name);
- fattr->valid = 0;
+ nfs_fattr_init(fattr);
fhandle->size = 0;
status = rpc_call(NFS_CLIENT(dir), NFSPROC_SYMLINK, &arg, NULL, 0);
+ nfs_mark_for_revalidate(dir);
dprintk("NFS reply symlink: %d\n", status);
return status;
}
@@ -416,8 +424,9 @@ nfs_proc_mkdir(struct inode *dir, struct dentry *dentry, struct iattr *sattr)
int status;
dprintk("NFS call mkdir %s\n", dentry->d_name.name);
- fattr.valid = 0;
+ nfs_fattr_init(&fattr);
status = rpc_call(NFS_CLIENT(dir), NFSPROC_MKDIR, &arg, &res, 0);
+ nfs_mark_for_revalidate(dir);
if (status == 0)
status = nfs_instantiate(dentry, &fhandle, &fattr);
dprintk("NFS reply mkdir: %d\n", status);
@@ -436,6 +445,7 @@ nfs_proc_rmdir(struct inode *dir, struct qstr *name)
dprintk("NFS call rmdir %s\n", name->name);
status = rpc_call(NFS_CLIENT(dir), NFSPROC_RMDIR, &arg, NULL, 0);
+ nfs_mark_for_revalidate(dir);
dprintk("NFS reply rmdir: %d\n", status);
return status;
}
@@ -484,7 +494,7 @@ nfs_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle,
int status;
dprintk("NFS call statfs\n");
- stat->fattr->valid = 0;
+ nfs_fattr_init(stat->fattr);
status = rpc_call(server->client, NFSPROC_STATFS, fhandle, &fsinfo, 0);
dprintk("NFS reply statfs: %d\n", status);
if (status)
@@ -507,7 +517,7 @@ nfs_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle,
int status;
dprintk("NFS call fsinfo\n");
- info->fattr->valid = 0;
+ nfs_fattr_init(info->fattr);
status = rpc_call(server->client, NFSPROC_STATFS, fhandle, &fsinfo, 0);
dprintk("NFS reply fsinfo: %d\n", status);
if (status)
@@ -579,7 +589,7 @@ nfs_write_done(struct rpc_task *task)
struct nfs_write_data *data = (struct nfs_write_data *) task->tk_calldata;
if (task->tk_status >= 0)
- nfs_refresh_inode(data->inode, data->res.fattr);
+ nfs_post_op_update_inode(data->inode, data->res.fattr);
nfs_writeback_done(task);
}
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index 9758ebd4990..43b03b19731 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -215,6 +215,7 @@ static void nfs_read_rpcsetup(struct nfs_page *req, struct nfs_read_data *data,
data->res.fattr = &data->fattr;
data->res.count = count;
data->res.eof = 0;
+ nfs_fattr_init(&data->fattr);
NFS_PROTO(inode)->read_setup(data);
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 5130eda231d..819a65f5071 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -870,6 +870,7 @@ static void nfs_write_rpcsetup(struct nfs_page *req,
data->res.fattr = &data->fattr;
data->res.count = count;
data->res.verf = &data->verf;
+ nfs_fattr_init(&data->fattr);
NFS_PROTO(inode)->write_setup(data, how);
@@ -1237,6 +1238,7 @@ static void nfs_commit_rpcsetup(struct list_head *head,
data->res.count = 0;
data->res.fattr = &data->fattr;
data->res.verf = &data->verf;
+ nfs_fattr_init(&data->fattr);
NFS_PROTO(inode)->commit_setup(data, how);
diff --git a/fs/open.c b/fs/open.c
index f0d90cf0495..8d06ec911fd 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -739,7 +739,8 @@ asmlinkage long sys_fchown(unsigned int fd, uid_t user, gid_t group)
}
static struct file *__dentry_open(struct dentry *dentry, struct vfsmount *mnt,
- int flags, struct file *f)
+ int flags, struct file *f,
+ int (*open)(struct inode *, struct file *))
{
struct inode *inode;
int error;
@@ -761,11 +762,14 @@ static struct file *__dentry_open(struct dentry *dentry, struct vfsmount *mnt,
f->f_op = fops_get(inode->i_fop);
file_move(f, &inode->i_sb->s_files);
- if (f->f_op && f->f_op->open) {
- error = f->f_op->open(inode,f);
+ if (!open && f->f_op)
+ open = f->f_op->open;
+ if (open) {
+ error = open(inode, f);
if (error)
goto cleanup_all;
}
+
f->f_flags &= ~(O_CREAT | O_EXCL | O_NOCTTY | O_TRUNC);
file_ra_state_init(&f->f_ra, f->f_mapping->host->i_mapping);
@@ -814,28 +818,75 @@ struct file *filp_open(const char * filename, int flags, int mode)
{
int namei_flags, error;
struct nameidata nd;
- struct file *f;
namei_flags = flags;
if ((namei_flags+1) & O_ACCMODE)
namei_flags++;
- if (namei_flags & O_TRUNC)
- namei_flags |= 2;
-
- error = -ENFILE;
- f = get_empty_filp();
- if (f == NULL)
- return ERR_PTR(error);
error = open_namei(filename, namei_flags, mode, &nd);
if (!error)
- return __dentry_open(nd.dentry, nd.mnt, flags, f);
+ return nameidata_to_filp(&nd, flags);
- put_filp(f);
return ERR_PTR(error);
}
EXPORT_SYMBOL(filp_open);
+/**
+ * lookup_instantiate_filp - instantiates the open intent filp
+ * @nd: pointer to nameidata
+ * @dentry: pointer to dentry
+ * @open: open callback
+ *
+ * Helper for filesystems that want to use lookup open intents and pass back
+ * a fully instantiated struct file to the caller.
+ * This function is meant to be called from within a filesystem's
+ * lookup method.
+ * Note that in case of error, nd->intent.open.file is destroyed, but the
+ * path information remains valid.
+ * If the open callback is set to NULL, then the standard f_op->open()
+ * filesystem callback is substituted.
+ */
+struct file *lookup_instantiate_filp(struct nameidata *nd, struct dentry *dentry,
+ int (*open)(struct inode *, struct file *))
+{
+ if (IS_ERR(nd->intent.open.file))
+ goto out;
+ if (IS_ERR(dentry))
+ goto out_err;
+ nd->intent.open.file = __dentry_open(dget(dentry), mntget(nd->mnt),
+ nd->intent.open.flags - 1,
+ nd->intent.open.file,
+ open);
+out:
+ return nd->intent.open.file;
+out_err:
+ release_open_intent(nd);
+ nd->intent.open.file = (struct file *)dentry;
+ goto out;
+}
+EXPORT_SYMBOL_GPL(lookup_instantiate_filp);
+
+/**
+ * nameidata_to_filp - convert a nameidata to an open filp.
+ * @nd: pointer to nameidata
+ * @flags: open flags
+ *
+ * Note that this function destroys the original nameidata
+ */
+struct file *nameidata_to_filp(struct nameidata *nd, int flags)
+{
+ struct file *filp;
+
+ /* Pick up the filp from the open intent */
+ filp = nd->intent.open.file;
+ /* Has the filesystem initialised the file for us? */
+ if (filp->f_dentry == NULL)
+ filp = __dentry_open(nd->dentry, nd->mnt, flags, filp, NULL);
+ else
+ path_release(nd);
+ return filp;
+}
+
struct file *dentry_open(struct dentry *dentry, struct vfsmount *mnt, int flags)
{
int error;
@@ -846,7 +897,7 @@ struct file *dentry_open(struct dentry *dentry, struct vfsmount *mnt, int flags)
if (f == NULL)
return ERR_PTR(error);
- return __dentry_open(dentry, mnt, flags, f);
+ return __dentry_open(dentry, mnt, flags, f, NULL);
}
EXPORT_SYMBOL(dentry_open);
diff --git a/fs/partitions/check.c b/fs/partitions/check.c
index 77e178f1316..1e848648a32 100644
--- a/fs/partitions/check.c
+++ b/fs/partitions/check.c
@@ -430,7 +430,7 @@ void del_gendisk(struct gendisk *disk)
disk->flags &= ~GENHD_FL_UP;
unlink_gendisk(disk);
disk_stat_set_all(disk, 0);
- disk->stamp = disk->stamp_idle = 0;
+ disk->stamp = 0;
devfs_remove_disk(disk);
diff --git a/fs/reiserfs/fix_node.c b/fs/reiserfs/fix_node.c
index 2706e2adffa..45829889dcd 100644
--- a/fs/reiserfs/fix_node.c
+++ b/fs/reiserfs/fix_node.c
@@ -2022,7 +2022,7 @@ static int get_neighbors(struct tree_balance *p_s_tb, int n_h)
}
#ifdef CONFIG_REISERFS_CHECK
-void *reiserfs_kmalloc(size_t size, int flags, struct super_block *s)
+void *reiserfs_kmalloc(size_t size, gfp_t flags, struct super_block *s)
{
void *vp;
static size_t malloced;
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index d76ee6c4f9b..5f82352b97e 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -2842,7 +2842,7 @@ static int reiserfs_set_page_dirty(struct page *page)
* even in -o notail mode, we can't be sure an old mount without -o notail
* didn't create files with tails.
*/
-static int reiserfs_releasepage(struct page *page, int unused_gfp_flags)
+static int reiserfs_releasepage(struct page *page, gfp_t unused_gfp_flags)
{
struct inode *inode = page->mapping->host;
struct reiserfs_journal *j = SB_JOURNAL(inode->i_sb);
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
index 87ac9dc8b38..72e12079867 100644
--- a/fs/reiserfs/xattr.c
+++ b/fs/reiserfs/xattr.c
@@ -453,7 +453,7 @@ static struct page *reiserfs_get_page(struct inode *dir, unsigned long n)
struct page *page;
/* We can deadlock if we try to free dentries,
and an unlink/rmdir has just occured - GFP_NOFS avoids this */
- mapping->flags = (mapping->flags & ~__GFP_BITS_MASK) | GFP_NOFS;
+ mapping_set_gfp_mask(mapping, GFP_NOFS);
page = read_cache_page(mapping, n,
(filler_t *) mapping->a_ops->readpage, NULL);
if (!IS_ERR(page)) {
diff --git a/fs/xfs/linux-2.6/kmem.c b/fs/xfs/linux-2.6/kmem.c
index d2653b589b1..3c92162dc72 100644
--- a/fs/xfs/linux-2.6/kmem.c
+++ b/fs/xfs/linux-2.6/kmem.c
@@ -45,11 +45,11 @@
void *
-kmem_alloc(size_t size, gfp_t flags)
+kmem_alloc(size_t size, unsigned int __nocast flags)
{
- int retries = 0;
- unsigned int lflags = kmem_flags_convert(flags);
- void *ptr;
+ int retries = 0;
+ gfp_t lflags = kmem_flags_convert(flags);
+ void *ptr;
do {
if (size < MAX_SLAB_SIZE || retries > MAX_VMALLOCS)
@@ -67,7 +67,7 @@ kmem_alloc(size_t size, gfp_t flags)
}
void *
-kmem_zalloc(size_t size, gfp_t flags)
+kmem_zalloc(size_t size, unsigned int __nocast flags)
{
void *ptr;
@@ -90,7 +90,7 @@ kmem_free(void *ptr, size_t size)
void *
kmem_realloc(void *ptr, size_t newsize, size_t oldsize,
- gfp_t flags)
+ unsigned int __nocast flags)
{
void *new;
@@ -105,11 +105,11 @@ kmem_realloc(void *ptr, size_t newsize, size_t oldsize,
}
void *
-kmem_zone_alloc(kmem_zone_t *zone, gfp_t flags)
+kmem_zone_alloc(kmem_zone_t *zone, unsigned int __nocast flags)
{
- int retries = 0;
- unsigned int lflags = kmem_flags_convert(flags);
- void *ptr;
+ int retries = 0;
+ gfp_t lflags = kmem_flags_convert(flags);
+ void *ptr;
do {
ptr = kmem_cache_alloc(zone, lflags);
@@ -124,7 +124,7 @@ kmem_zone_alloc(kmem_zone_t *zone, gfp_t flags)
}
void *
-kmem_zone_zalloc(kmem_zone_t *zone, gfp_t flags)
+kmem_zone_zalloc(kmem_zone_t *zone, unsigned int __nocast flags)
{
void *ptr;
diff --git a/fs/xfs/linux-2.6/kmem.h b/fs/xfs/linux-2.6/kmem.h
index ee7010f085b..f4bb78c268c 100644
--- a/fs/xfs/linux-2.6/kmem.h
+++ b/fs/xfs/linux-2.6/kmem.h
@@ -81,9 +81,9 @@ typedef unsigned long xfs_pflags_t;
*(NSTATEP) = *(OSTATEP); \
} while (0)
-static __inline unsigned int kmem_flags_convert(gfp_t flags)
+static __inline gfp_t kmem_flags_convert(unsigned int __nocast flags)
{
- unsigned int lflags = __GFP_NOWARN; /* we'll report problems, if need be */
+ gfp_t lflags = __GFP_NOWARN; /* we'll report problems, if need be */
#ifdef DEBUG
if (unlikely(flags & ~(KM_SLEEP|KM_NOSLEEP|KM_NOFS|KM_MAYFAIL))) {
@@ -125,16 +125,16 @@ kmem_zone_destroy(kmem_zone_t *zone)
BUG();
}
-extern void *kmem_zone_zalloc(kmem_zone_t *, gfp_t);
-extern void *kmem_zone_alloc(kmem_zone_t *, gfp_t);
+extern void *kmem_zone_zalloc(kmem_zone_t *, unsigned int __nocast);
+extern void *kmem_zone_alloc(kmem_zone_t *, unsigned int __nocast);
-extern void *kmem_alloc(size_t, gfp_t);
-extern void *kmem_realloc(void *, size_t, size_t, gfp_t);
-extern void *kmem_zalloc(size_t, gfp_t);
+extern void *kmem_alloc(size_t, unsigned int __nocast);
+extern void *kmem_realloc(void *, size_t, size_t, unsigned int __nocast);
+extern void *kmem_zalloc(size_t, unsigned int __nocast);
extern void kmem_free(void *, size_t);
typedef struct shrinker *kmem_shaker_t;
-typedef int (*kmem_shake_func_t)(int, unsigned int);
+typedef int (*kmem_shake_func_t)(int, gfp_t);
static __inline kmem_shaker_t
kmem_shake_register(kmem_shake_func_t sfunc)
@@ -149,7 +149,7 @@ kmem_shake_deregister(kmem_shaker_t shrinker)
}
static __inline int
-kmem_shake_allow(unsigned int gfp_mask)
+kmem_shake_allow(gfp_t gfp_mask)
{
return (gfp_mask & __GFP_WAIT);
}
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index c6c077978fe..7aa39872470 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -1296,7 +1296,7 @@ linvfs_invalidate_page(
STATIC int
linvfs_release_page(
struct page *page,
- int gfp_mask)
+ gfp_t gfp_mask)
{
struct inode *inode = page->mapping->host;
int dirty, delalloc, unmapped, unwritten;
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index e82cf72ac59..ba4767c04ad 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -64,7 +64,7 @@
STATIC kmem_cache_t *pagebuf_zone;
STATIC kmem_shaker_t pagebuf_shake;
-STATIC int xfsbufd_wakeup(int, unsigned int);
+STATIC int xfsbufd_wakeup(int, gfp_t);
STATIC void pagebuf_delwri_queue(xfs_buf_t *, int);
STATIC struct workqueue_struct *xfslogd_workqueue;
@@ -383,7 +383,7 @@ _pagebuf_lookup_pages(
size_t blocksize = bp->pb_target->pbr_bsize;
size_t size = bp->pb_count_desired;
size_t nbytes, offset;
- int gfp_mask = pb_to_gfp(flags);
+ gfp_t gfp_mask = pb_to_gfp(flags);
unsigned short page_count, i;
pgoff_t first;
loff_t end;
@@ -1749,8 +1749,8 @@ STATIC int xfsbufd_force_sleep;
STATIC int
xfsbufd_wakeup(
- int priority,
- unsigned int mask)
+ int priority,
+ gfp_t mask)
{
if (xfsbufd_force_sleep)
return 0;
diff --git a/include/asm-alpha/atomic.h b/include/asm-alpha/atomic.h
index 1b383e3cb68..20ac3d95ecd 100644
--- a/include/asm-alpha/atomic.h
+++ b/include/asm-alpha/atomic.h
@@ -1,6 +1,8 @@
#ifndef _ALPHA_ATOMIC_H
#define _ALPHA_ATOMIC_H
+#include <asm/barrier.h>
+
/*
* Atomic operations that C can't guarantee us. Useful for
* resource counting etc...
@@ -100,18 +102,19 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v)
static __inline__ long atomic_add_return(int i, atomic_t * v)
{
long temp, result;
+ smp_mb();
__asm__ __volatile__(
"1: ldl_l %0,%1\n"
" addl %0,%3,%2\n"
" addl %0,%3,%0\n"
" stl_c %0,%1\n"
" beq %0,2f\n"
- " mb\n"
".subsection 2\n"
"2: br 1b\n"
".previous"
:"=&r" (temp), "=m" (v->counter), "=&r" (result)
:"Ir" (i), "m" (v->counter) : "memory");
+ smp_mb();
return result;
}
@@ -120,54 +123,57 @@ static __inline__ long atomic_add_return(int i, atomic_t * v)
static __inline__ long atomic64_add_return(long i, atomic64_t * v)
{
long temp, result;
+ smp_mb();
__asm__ __volatile__(
"1: ldq_l %0,%1\n"
" addq %0,%3,%2\n"
" addq %0,%3,%0\n"
" stq_c %0,%1\n"
" beq %0,2f\n"
- " mb\n"
".subsection 2\n"
"2: br 1b\n"
".previous"
:"=&r" (temp), "=m" (v->counter), "=&r" (result)
:"Ir" (i), "m" (v->counter) : "memory");
+ smp_mb();
return result;
}
static __inline__ long atomic_sub_return(int i, atomic_t * v)
{
long temp, result;
+ smp_mb();
__asm__ __volatile__(
"1: ldl_l %0,%1\n"
" subl %0,%3,%2\n"
" subl %0,%3,%0\n"
" stl_c %0,%1\n"
" beq %0,2f\n"
- " mb\n"
".subsection 2\n"
"2: br 1b\n"
".previous"
:"=&r" (temp), "=m" (v->counter), "=&r" (result)
:"Ir" (i), "m" (v->counter) : "memory");
+ smp_mb();
return result;
}
static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
{
long temp, result;
+ smp_mb();
__asm__ __volatile__(
"1: ldq_l %0,%1\n"
" subq %0,%3,%2\n"
" subq %0,%3,%0\n"
" stq_c %0,%1\n"
" beq %0,2f\n"
- " mb\n"
".subsection 2\n"
"2: br 1b\n"
".previous"
:"=&r" (temp), "=m" (v->counter), "=&r" (result)
:"Ir" (i), "m" (v->counter) : "memory");
+ smp_mb();
return result;
}
diff --git a/include/asm-alpha/barrier.h b/include/asm-alpha/barrier.h
new file mode 100644
index 00000000000..229c83fe77c
--- /dev/null
+++ b/include/asm-alpha/barrier.h
@@ -0,0 +1,34 @@
+#ifndef __BARRIER_H
+#define __BARRIER_H
+
+#define mb() \
+__asm__ __volatile__("mb": : :"memory")
+
+#define rmb() \
+__asm__ __volatile__("mb": : :"memory")
+
+#define wmb() \
+__asm__ __volatile__("wmb": : :"memory")
+
+#define read_barrier_depends() \
+__asm__ __volatile__("mb": : :"memory")
+
+#ifdef CONFIG_SMP
+#define smp_mb() mb()
+#define smp_rmb() rmb()
+#define smp_wmb() wmb()
+#define smp_read_barrier_depends() read_barrier_depends()
+#else
+#define smp_mb() barrier()
+#define smp_rmb() barrier()
+#define smp_wmb() barrier()
+#define smp_read_barrier_depends() barrier()
+#endif
+
+#define set_mb(var, value) \
+do { var = value; mb(); } while (0)
+
+#define set_wmb(var, value) \
+do { var = value; wmb(); } while (0)
+
+#endif /* __BARRIER_H */
diff --git a/include/asm-alpha/dma-mapping.h b/include/asm-alpha/dma-mapping.h
index c675f282d6a..680f7ecbb28 100644
--- a/include/asm-alpha/dma-mapping.h
+++ b/include/asm-alpha/dma-mapping.h
@@ -31,7 +31,7 @@
#else /* no PCI - no IOMMU. */
void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, int gfp);
+ dma_addr_t *dma_handle, gfp_t gfp);
int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction direction);
diff --git a/include/asm-alpha/system.h b/include/asm-alpha/system.h
index bdb4d66418f..050e86d1289 100644
--- a/include/asm-alpha/system.h
+++ b/include/asm-alpha/system.h
@@ -4,6 +4,7 @@
#include <linux/config.h>
#include <asm/pal.h>
#include <asm/page.h>
+#include <asm/barrier.h>
/*
* System defines.. Note that this is included both from .c and .S
@@ -139,36 +140,6 @@ extern void halt(void) __attribute__((noreturn));
struct task_struct;
extern struct task_struct *alpha_switch_to(unsigned long, struct task_struct*);
-#define mb() \
-__asm__ __volatile__("mb": : :"memory")
-
-#define rmb() \
-__asm__ __volatile__("mb": : :"memory")
-
-#define wmb() \
-__asm__ __volatile__("wmb": : :"memory")
-
-#define read_barrier_depends() \
-__asm__ __volatile__("mb": : :"memory")
-
-#ifdef CONFIG_SMP
-#define smp_mb() mb()
-#define smp_rmb() rmb()
-#define smp_wmb() wmb()
-#define smp_read_barrier_depends() read_barrier_depends()
-#else
-#define smp_mb() barrier()
-#define smp_rmb() barrier()
-#define smp_wmb() barrier()
-#define smp_read_barrier_depends() barrier()
-#endif
-
-#define set_mb(var, value) \
-do { var = value; mb(); } while (0)
-
-#define set_wmb(var, value) \
-do { var = value; wmb(); } while (0)
-
#define imb() \
__asm__ __volatile__ ("call_pal %0 #imb" : : "i" (PAL_imb) : "memory")
diff --git a/include/asm-arm/arch-s3c2410/regs-clock.h b/include/asm-arm/arch-s3c2410/regs-clock.h
index 16f4c3cc138..34360706e01 100644
--- a/include/asm-arm/arch-s3c2410/regs-clock.h
+++ b/include/asm-arm/arch-s3c2410/regs-clock.h
@@ -18,7 +18,9 @@
* 10-Feb-2005 Ben Dooks Fixed CAMDIVN address (Guillaume Gourat)
* 10-Mar-2005 Lucas Villa Real Changed S3C2410_VA to S3C24XX_VA
* 27-Aug-2005 Ben Dooks Add clock-slow info
- */
+ * 20-Oct-2005 Ben Dooks Fixed overflow in PLL (Guillaume Gourat)
+ * 20-Oct-2005 Ben Dooks Add masks for DCLK (Guillaume Gourat)
+*/
#ifndef __ASM_ARM_REGS_CLOCK
#define __ASM_ARM_REGS_CLOCK "$Id: clock.h,v 1.4 2003/04/30 14:50:51 ben Exp $"
@@ -66,11 +68,16 @@
#define S3C2410_DCLKCON_DCLK0_UCLK (1<<1)
#define S3C2410_DCLKCON_DCLK0_DIV(x) (((x) - 1 )<<4)
#define S3C2410_DCLKCON_DCLK0_CMP(x) (((x) - 1 )<<8)
+#define S3C2410_DCLKCON_DCLK0_DIV_MASK ((0xf)<<4)
+#define S3C2410_DCLKCON_DCLK0_CMP_MASK ((0xf)<<8)
#define S3C2410_DCLKCON_DCLK1EN (1<<16)
#define S3C2410_DCLKCON_DCLK1_PCLK (0<<17)
#define S3C2410_DCLKCON_DCLK1_UCLK (1<<17)
#define S3C2410_DCLKCON_DCLK1_DIV(x) (((x) - 1) <<20)
+#define S3C2410_DCLKCON_DCLK1_CMP(x) (((x) - 1) <<24)
+#define S3C2410_DCLKCON_DCLK1_DIV_MASK ((0xf) <<20)
+#define S3C2410_DCLKCON_DCLK1_CMP_MASK ((0xf) <<24)
#define S3C2410_CLKDIVN_PDIVN (1<<0)
#define S3C2410_CLKDIVN_HDIVN (1<<1)
@@ -83,10 +90,13 @@
#ifndef __ASSEMBLY__
+#include <asm/div64.h>
+
static inline unsigned int
-s3c2410_get_pll(int pllval, int baseclk)
+s3c2410_get_pll(unsigned int pllval, unsigned int baseclk)
{
- int mdiv, pdiv, sdiv;
+ unsigned int mdiv, pdiv, sdiv;
+ uint64_t fvco;
mdiv = pllval >> S3C2410_PLLCON_MDIVSHIFT;
pdiv = pllval >> S3C2410_PLLCON_PDIVSHIFT;
@@ -96,7 +106,10 @@ s3c2410_get_pll(int pllval, int baseclk)
pdiv &= S3C2410_PLLCON_PDIVMASK;
sdiv &= S3C2410_PLLCON_SDIVMASK;
- return (baseclk * (mdiv + 8)) / ((pdiv + 2) << sdiv);
+ fvco = (uint64_t)baseclk * (mdiv + 8);
+ do_div(fvco, (pdiv + 2) << sdiv);
+
+ return (unsigned int)fvco;
}
#endif /* __ASSEMBLY__ */
diff --git a/include/asm-arm/bitops.h b/include/asm-arm/bitops.h
index aad7aad026b..e007dd990da 100644
--- a/include/asm-arm/bitops.h
+++ b/include/asm-arm/bitops.h
@@ -347,7 +347,6 @@ static inline unsigned long __ffs(unsigned long word)
* the clz instruction for much better code efficiency.
*/
-static __inline__ int generic_fls(int x);
#define fls(x) \
( __builtin_constant_p(x) ? generic_fls(x) : \
({ int __r; asm("clz\t%0, %1" : "=r"(__r) : "r"(x) : "cc"); 32-__r; }) )
diff --git a/include/asm-arm/dma-mapping.h b/include/asm-arm/dma-mapping.h
index d62ade4e4cb..e3e8541ee63 100644
--- a/include/asm-arm/dma-mapping.h
+++ b/include/asm-arm/dma-mapping.h
@@ -70,7 +70,7 @@ static inline int dma_mapping_error(dma_addr_t dma_addr)
* device-viewed address.
*/
extern void *
-dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, int gfp);
+dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp);
/**
* dma_free_coherent - free memory allocated by dma_alloc_coherent
@@ -117,7 +117,7 @@ int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
* device-viewed address.
*/
extern void *
-dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, int gfp);
+dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp);
#define dma_free_writecombine(dev,size,cpu_addr,handle) \
dma_free_coherent(dev,size,cpu_addr,handle)
diff --git a/include/asm-cris/dma-mapping.h b/include/asm-cris/dma-mapping.h
index 0b5c3fdaefe..8eff51349ae 100644
--- a/include/asm-cris/dma-mapping.h
+++ b/include/asm-cris/dma-mapping.h
@@ -15,14 +15,14 @@
#ifdef CONFIG_PCI
void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, int flag);
+ dma_addr_t *dma_handle, gfp_t flag);
void dma_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle);
#else
static inline void *
dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
- int flag)
+ gfp_t flag)
{
BUG();
return NULL;
diff --git a/include/asm-frv/dma-mapping.h b/include/asm-frv/dma-mapping.h
index 0206ab35eae..5003e017fd1 100644
--- a/include/asm-frv/dma-mapping.h
+++ b/include/asm-frv/dma-mapping.h
@@ -13,7 +13,7 @@
extern unsigned long __nongprelbss dma_coherent_mem_start;
extern unsigned long __nongprelbss dma_coherent_mem_end;
-void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, int gfp);
+void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp);
void dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle);
/*
diff --git a/include/asm-frv/pci.h b/include/asm-frv/pci.h
index b4efe5e3591..1168451c275 100644
--- a/include/asm-frv/pci.h
+++ b/include/asm-frv/pci.h
@@ -32,7 +32,7 @@ extern void pcibios_set_master(struct pci_dev *dev);
extern void pcibios_penalize_isa_irq(int irq);
#ifdef CONFIG_MMU
-extern void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle);
+extern void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *dma_handle);
extern void consistent_free(void *vaddr);
extern void consistent_sync(void *vaddr, size_t size, int direction);
extern void consistent_sync_page(struct page *page, unsigned long offset,
diff --git a/include/asm-generic/dma-mapping-broken.h b/include/asm-generic/dma-mapping-broken.h
index fd9de9502df..a7f1a55ce6b 100644
--- a/include/asm-generic/dma-mapping-broken.h
+++ b/include/asm-generic/dma-mapping-broken.h
@@ -6,7 +6,7 @@
static inline void *
dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
- int flag)
+ gfp_t flag)
{
BUG();
return NULL;
diff --git a/include/asm-ia64/machvec.h b/include/asm-ia64/machvec.h
index 79e89a7db56..a2f6ac5aef7 100644
--- a/include/asm-ia64/machvec.h
+++ b/include/asm-ia64/machvec.h
@@ -37,7 +37,7 @@ typedef int ia64_mv_pci_legacy_write_t (struct pci_bus *, u16 port, u32 val,
/* DMA-mapping interface: */
typedef void ia64_mv_dma_init (void);
-typedef void *ia64_mv_dma_alloc_coherent (struct device *, size_t, dma_addr_t *, int);
+typedef void *ia64_mv_dma_alloc_coherent (struct device *, size_t, dma_addr_t *, gfp_t);
typedef void ia64_mv_dma_free_coherent (struct device *, size_t, void *, dma_addr_t);
typedef dma_addr_t ia64_mv_dma_map_single (struct device *, void *, size_t, int);
typedef void ia64_mv_dma_unmap_single (struct device *, dma_addr_t, size_t, int);
diff --git a/include/asm-m32r/dma-mapping.h b/include/asm-m32r/dma-mapping.h
index 3a2db28834b..a7fa0302bda 100644
--- a/include/asm-m32r/dma-mapping.h
+++ b/include/asm-m32r/dma-mapping.h
@@ -8,7 +8,7 @@
static inline void *
dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
- int flag)
+ gfp_t flag)
{
return (void *)NULL;
}
diff --git a/include/asm-mips/dma-mapping.h b/include/asm-mips/dma-mapping.h
index af28dc88930..43288634c38 100644
--- a/include/asm-mips/dma-mapping.h
+++ b/include/asm-mips/dma-mapping.h
@@ -5,13 +5,13 @@
#include <asm/cache.h>
void *dma_alloc_noncoherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, int flag);
+ dma_addr_t *dma_handle, gfp_t flag);
void dma_free_noncoherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle);
void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, int flag);
+ dma_addr_t *dma_handle, gfp_t flag);
void dma_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle);
diff --git a/include/asm-mips/sgi/hpc3.h b/include/asm-mips/sgi/hpc3.h
index ac3dfc7af5b..fcec52bafb2 100644
--- a/include/asm-mips/sgi/hpc3.h
+++ b/include/asm-mips/sgi/hpc3.h
@@ -128,26 +128,26 @@ struct hpc3_ethregs {
volatile u32 rx_gfptr; /* current GIO fifo ptr */
volatile u32 rx_dfptr; /* current device fifo ptr */
u32 _unused1; /* padding */
- volatile u32 rx_reset; /* reset register */
-#define HPC3_ERXRST_CRESET 0x1 /* Reset dma channel and external controller */
-#define HPC3_ERXRST_CLRIRQ 0x2 /* Clear channel interrupt */
-#define HPC3_ERXRST_LBACK 0x4 /* Enable diagnostic loopback mode of Seeq8003 */
-
- volatile u32 rx_dconfig; /* DMA configuration register */
-#define HPC3_ERXDCFG_D1 0x0000f /* Cycles to spend in D1 state for PIO */
-#define HPC3_ERXDCFG_D2 0x000f0 /* Cycles to spend in D2 state for PIO */
-#define HPC3_ERXDCFG_D3 0x00f00 /* Cycles to spend in D3 state for PIO */
-#define HPC3_ERXDCFG_WCTRL 0x01000 /* Enable writes of desc into ex ctrl port */
-#define HPC3_ERXDCFG_FRXDC 0x02000 /* Clear eop stat bits upon rxdc, hw seeq fix */
-#define HPC3_ERXDCFG_FEOP 0x04000 /* Bad packet marker timeout enable */
-#define HPC3_ERXDCFG_FIRQ 0x08000 /* Another bad packet timeout enable */
-#define HPC3_ERXDCFG_PTO 0x30000 /* Programmed timeout value for above two */
-
- volatile u32 rx_pconfig; /* PIO configuration register */
-#define HPC3_ERXPCFG_P1 0x000f /* Cycles to spend in P1 state for PIO */
-#define HPC3_ERXPCFG_P2 0x00f0 /* Cycles to spend in P2 state for PIO */
-#define HPC3_ERXPCFG_P3 0x0f00 /* Cycles to spend in P3 state for PIO */
-#define HPC3_ERXPCFG_TST 0x1000 /* Diagnistic ram test feature bit */
+ volatile u32 reset; /* reset register */
+#define HPC3_ERST_CRESET 0x1 /* Reset dma channel and external controller */
+#define HPC3_ERST_CLRIRQ 0x2 /* Clear channel interrupt */
+#define HPC3_ERST_LBACK 0x4 /* Enable diagnostic loopback mode of Seeq8003 */
+
+ volatile u32 dconfig; /* DMA configuration register */
+#define HPC3_EDCFG_D1 0x0000f /* Cycles to spend in D1 state for PIO */
+#define HPC3_EDCFG_D2 0x000f0 /* Cycles to spend in D2 state for PIO */
+#define HPC3_EDCFG_D3 0x00f00 /* Cycles to spend in D3 state for PIO */
+#define HPC3_EDCFG_WCTRL 0x01000 /* Enable writes of desc into ex ctrl port */
+#define HPC3_EDCFG_FRXDC 0x02000 /* Clear eop stat bits upon rxdc, hw seeq fix */
+#define HPC3_EDCFG_FEOP 0x04000 /* Bad packet marker timeout enable */
+#define HPC3_EDCFG_FIRQ 0x08000 /* Another bad packet timeout enable */
+#define HPC3_EDCFG_PTO 0x30000 /* Programmed timeout value for above two */
+
+ volatile u32 pconfig; /* PIO configuration register */
+#define HPC3_EPCFG_P1 0x000f /* Cycles to spend in P1 state for PIO */
+#define HPC3_EPCFG_P2 0x00f0 /* Cycles to spend in P2 state for PIO */
+#define HPC3_EPCFG_P3 0x0f00 /* Cycles to spend in P3 state for PIO */
+#define HPC3_EPCFG_TST 0x1000 /* Diagnistic ram test feature bit */
u32 _unused2[0x1000/4 - 8]; /* padding */
diff --git a/include/asm-parisc/dma-mapping.h b/include/asm-parisc/dma-mapping.h
index 4db84f969e9..74d4ac6f215 100644
--- a/include/asm-parisc/dma-mapping.h
+++ b/include/asm-parisc/dma-mapping.h
@@ -9,8 +9,8 @@
/* See Documentation/DMA-mapping.txt */
struct hppa_dma_ops {
int (*dma_supported)(struct device *dev, u64 mask);
- void *(*alloc_consistent)(struct device *dev, size_t size, dma_addr_t *iova, int flag);
- void *(*alloc_noncoherent)(struct device *dev, size_t size, dma_addr_t *iova, int flag);
+ void *(*alloc_consistent)(struct device *dev, size_t size, dma_addr_t *iova, gfp_t flag);
+ void *(*alloc_noncoherent)(struct device *dev, size_t size, dma_addr_t *iova, gfp_t flag);
void (*free_consistent)(struct device *dev, size_t size, void *vaddr, dma_addr_t iova);
dma_addr_t (*map_single)(struct device *dev, void *addr, size_t size, enum dma_data_direction direction);
void (*unmap_single)(struct device *dev, dma_addr_t iova, size_t size, enum dma_data_direction direction);
@@ -49,14 +49,14 @@ extern struct hppa_dma_ops *hppa_dma_ops;
static inline void *
dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
- int flag)
+ gfp_t flag)
{
return hppa_dma_ops->alloc_consistent(dev, size, dma_handle, flag);
}
static inline void *
dma_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
- int flag)
+ gfp_t flag)
{
return hppa_dma_ops->alloc_noncoherent(dev, size, dma_handle, flag);
}
diff --git a/include/asm-ppc/dma-mapping.h b/include/asm-ppc/dma-mapping.h
index 061bfcac1bf..6e963511443 100644
--- a/include/asm-ppc/dma-mapping.h
+++ b/include/asm-ppc/dma-mapping.h
@@ -19,7 +19,7 @@
* allocate the space "normally" and use the cache management functions
* to ensure it is consistent.
*/
-extern void *__dma_alloc_coherent(size_t size, dma_addr_t *handle, int gfp);
+extern void *__dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp);
extern void __dma_free_coherent(size_t size, void *vaddr);
extern void __dma_sync(void *vaddr, size_t size, int direction);
extern void __dma_sync_page(struct page *page, unsigned long offset,
diff --git a/include/asm-sh/dma-mapping.h b/include/asm-sh/dma-mapping.h
index 80d164c1529..d3fa5c2b889 100644
--- a/include/asm-sh/dma-mapping.h
+++ b/include/asm-sh/dma-mapping.h
@@ -9,7 +9,7 @@
extern struct bus_type pci_bus_type;
/* arch/sh/mm/consistent.c */
-extern void *consistent_alloc(int gfp, size_t size, dma_addr_t *handle);
+extern void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *handle);
extern void consistent_free(void *vaddr, size_t size);
extern void consistent_sync(void *vaddr, size_t size, int direction);
@@ -26,7 +26,7 @@ static inline int dma_set_mask(struct device *dev, u64 mask)
}
static inline void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, int flag)
+ dma_addr_t *dma_handle, gfp_t flag)
{
if (sh_mv.mv_consistent_alloc) {
void *ret;
diff --git a/include/asm-sh/machvec.h b/include/asm-sh/machvec.h
index 5771f4baa47..3f18aa18051 100644
--- a/include/asm-sh/machvec.h
+++ b/include/asm-sh/machvec.h
@@ -64,7 +64,7 @@ struct sh_machine_vector
void (*mv_heartbeat)(void);
- void *(*mv_consistent_alloc)(struct device *, size_t, dma_addr_t *, int);
+ void *(*mv_consistent_alloc)(struct device *, size_t, dma_addr_t *, gfp_t);
int (*mv_consistent_free)(struct device *, size_t, void *, dma_addr_t);
};
diff --git a/include/asm-sh64/dma-mapping.h b/include/asm-sh64/dma-mapping.h
index b8d26fe677f..cc9a2e86f5b 100644
--- a/include/asm-sh64/dma-mapping.h
+++ b/include/asm-sh64/dma-mapping.h
@@ -25,7 +25,7 @@ static inline int dma_set_mask(struct device *dev, u64 mask)
}
static inline void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, int flag)
+ dma_addr_t *dma_handle, gfp_t flag)
{
return consistent_alloc(NULL, size, dma_handle);
}
diff --git a/include/asm-sparc/dma-mapping.h b/include/asm-sparc/dma-mapping.h
index 2dc5bb8effa..d7c3b0f0a90 100644
--- a/include/asm-sparc/dma-mapping.h
+++ b/include/asm-sparc/dma-mapping.h
@@ -8,7 +8,7 @@
#else
static inline void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, int flag)
+ dma_addr_t *dma_handle, gfp_t flag)
{
BUG();
return NULL;
diff --git a/include/asm-sparc64/dma-mapping.h b/include/asm-sparc64/dma-mapping.h
index 1c5da41653a..c7d5804ba76 100644
--- a/include/asm-sparc64/dma-mapping.h
+++ b/include/asm-sparc64/dma-mapping.h
@@ -10,7 +10,7 @@
struct device;
static inline void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, int flag)
+ dma_addr_t *dma_handle, gfp_t flag)
{
BUG();
return NULL;
diff --git a/include/asm-um/dma-mapping.h b/include/asm-um/dma-mapping.h
index 13e6291f715..babd2989511 100644
--- a/include/asm-um/dma-mapping.h
+++ b/include/asm-um/dma-mapping.h
@@ -19,7 +19,7 @@ dma_set_mask(struct device *dev, u64 dma_mask)
static inline void *
dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
- int flag)
+ gfp_t flag)
{
BUG();
return((void *) 0);
diff --git a/include/asm-um/page.h b/include/asm-um/page.h
index 2c192abe9ae..0229814af31 100644
--- a/include/asm-um/page.h
+++ b/include/asm-um/page.h
@@ -115,7 +115,7 @@ extern unsigned long uml_physmem;
#define pfn_valid(pfn) ((pfn) < max_mapnr)
#define virt_addr_valid(v) pfn_valid(phys_to_pfn(__pa(v)))
-extern struct page *arch_validate(struct page *page, int mask, int order);
+extern struct page *arch_validate(struct page *page, gfp_t mask, int order);
#define HAVE_ARCH_VALIDATE
extern void arch_free_page(struct page *page, int order);
diff --git a/include/asm-x86_64/dma-mapping.h b/include/asm-x86_64/dma-mapping.h
index e784fdc524f..54a380efed4 100644
--- a/include/asm-x86_64/dma-mapping.h
+++ b/include/asm-x86_64/dma-mapping.h
@@ -17,7 +17,7 @@ extern dma_addr_t bad_dma_address;
(swiotlb ? swiotlb_dma_mapping_error(x) : ((x) == bad_dma_address))
void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
- unsigned gfp);
+ gfp_t gfp);
void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle);
diff --git a/include/asm-x86_64/pci.h b/include/asm-x86_64/pci.h
index 5a82a6762c2..eeb3088a1c9 100644
--- a/include/asm-x86_64/pci.h
+++ b/include/asm-x86_64/pci.h
@@ -50,10 +50,10 @@ extern int iommu_setup(char *opt);
* address space. The networking and block device layers use
* this boolean for bounce buffer decisions
*
- * On x86-64 it mostly equals, but we set it to zero to tell some subsystems
- * that an hard or soft IOMMU is available.
+ * On AMD64 it mostly equals, but we set it to zero to tell some subsystems
+ * that an IOMMU is available.
*/
-#define PCI_DMA_BUS_IS_PHYS 0
+#define PCI_DMA_BUS_IS_PHYS (no_iommu ? 1 : 0)
/*
* x86-64 always supports DAC, but sometimes it is useful to force
diff --git a/include/asm-x86_64/swiotlb.h b/include/asm-x86_64/swiotlb.h
index 36293061f4e..7cbfd10ecc3 100644
--- a/include/asm-x86_64/swiotlb.h
+++ b/include/asm-x86_64/swiotlb.h
@@ -27,7 +27,7 @@ extern void swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg,
int nents, int direction);
extern int swiotlb_dma_mapping_error(dma_addr_t dma_addr);
extern void *swiotlb_alloc_coherent (struct device *hwdev, size_t size,
- dma_addr_t *dma_handle, int flags);
+ dma_addr_t *dma_handle, gfp_t flags);
extern void swiotlb_free_coherent (struct device *hwdev, size_t size,
void *vaddr, dma_addr_t dma_handle);
diff --git a/include/asm-xtensa/dma-mapping.h b/include/asm-xtensa/dma-mapping.h
index e86a206f120..c425f10d086 100644
--- a/include/asm-xtensa/dma-mapping.h
+++ b/include/asm-xtensa/dma-mapping.h
@@ -28,7 +28,7 @@ extern void consistent_sync(void*, size_t, int);
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, int flag);
+ dma_addr_t *dma_handle, gfp_t flag);
void dma_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle);
diff --git a/include/linux/ata.h b/include/linux/ata.h
index 33276d1d05d..d2873b732bb 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -261,7 +261,7 @@ struct ata_taskfile {
((u64) (id)[(n) + 1] << 16) | \
((u64) (id)[(n) + 0]) )
-static inline int ata_id_current_chs_valid(u16 *id)
+static inline int ata_id_current_chs_valid(const u16 *id)
{
/* For ATA-1 devices, if the INITIALIZE DEVICE PARAMETERS command
has not been issued to the device then the values of
@@ -273,7 +273,7 @@ static inline int ata_id_current_chs_valid(u16 *id)
id[56]; /* sectors in current translation */
}
-static inline int atapi_cdb_len(u16 *dev_id)
+static inline int atapi_cdb_len(const u16 *dev_id)
{
u16 tmp = dev_id[0] & 0x3;
switch (tmp) {
@@ -283,7 +283,7 @@ static inline int atapi_cdb_len(u16 *dev_id)
}
}
-static inline int is_atapi_taskfile(struct ata_taskfile *tf)
+static inline int is_atapi_taskfile(const struct ata_taskfile *tf)
{
return (tf->protocol == ATA_PROT_ATAPI) ||
(tf->protocol == ATA_PROT_ATAPI_NODATA) ||
diff --git a/include/linux/audit.h b/include/linux/audit.h
index b2a2509bd7e..da3c01955f3 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -260,11 +260,11 @@ extern int audit_filter_user(struct netlink_skb_parms *cb, int type);
#ifdef CONFIG_AUDIT
/* These are defined in audit.c */
/* Public API */
-extern void audit_log(struct audit_context *ctx, int gfp_mask,
+extern void audit_log(struct audit_context *ctx, gfp_t gfp_mask,
int type, const char *fmt, ...)
__attribute__((format(printf,4,5)));
-extern struct audit_buffer *audit_log_start(struct audit_context *ctx, int gfp_mask, int type);
+extern struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask, int type);
extern void audit_log_format(struct audit_buffer *ab,
const char *fmt, ...)
__attribute__((format(printf,2,3)));
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 3344b4e8e43..685fd3720df 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -301,7 +301,7 @@ extern struct bio *bio_map_user_iov(struct request_queue *,
struct sg_iovec *, int, int);
extern void bio_unmap_user(struct bio *);
extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int,
- unsigned int);
+ gfp_t);
extern void bio_set_pages_dirty(struct bio *bio);
extern void bio_check_pages_dirty(struct bio *bio);
extern struct bio *bio_copy_user(struct request_queue *, unsigned long, unsigned int, int);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index efdc9b5bc05..025a7f084db 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -96,8 +96,8 @@ struct io_context {
void put_io_context(struct io_context *ioc);
void exit_io_context(void);
-struct io_context *current_io_context(int gfp_flags);
-struct io_context *get_io_context(int gfp_flags);
+struct io_context *current_io_context(gfp_t gfp_flags);
+struct io_context *get_io_context(gfp_t gfp_flags);
void copy_io_context(struct io_context **pdst, struct io_context **psrc);
void swap_io_context(struct io_context **ioc1, struct io_context **ioc2);
@@ -107,9 +107,9 @@ typedef void (rq_end_io_fn)(struct request *);
struct request_list {
int count[2];
int starved[2];
+ int elvpriv;
mempool_t *rq_pool;
wait_queue_head_t wait[2];
- wait_queue_head_t drain;
};
#define BLK_MAX_CDB 16
@@ -203,6 +203,7 @@ struct request {
enum rq_flag_bits {
__REQ_RW, /* not set, read. set, write */
__REQ_FAILFAST, /* no low level driver retries */
+ __REQ_SORTED, /* elevator knows about this request */
__REQ_SOFTBARRIER, /* may not be passed by ioscheduler */
__REQ_HARDBARRIER, /* may not be passed by drive either */
__REQ_CMD, /* is a regular fs rw request */
@@ -210,6 +211,7 @@ enum rq_flag_bits {
__REQ_STARTED, /* drive already may have started this one */
__REQ_DONTPREP, /* don't call prep for this one */
__REQ_QUEUED, /* uses queueing */
+ __REQ_ELVPRIV, /* elevator private data attached */
/*
* for ATA/ATAPI devices
*/
@@ -235,6 +237,7 @@ enum rq_flag_bits {
#define REQ_RW (1 << __REQ_RW)
#define REQ_FAILFAST (1 << __REQ_FAILFAST)
+#define REQ_SORTED (1 << __REQ_SORTED)
#define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER)
#define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER)
#define REQ_CMD (1 << __REQ_CMD)
@@ -242,6 +245,7 @@ enum rq_flag_bits {
#define REQ_STARTED (1 << __REQ_STARTED)
#define REQ_DONTPREP (1 << __REQ_DONTPREP)
#define REQ_QUEUED (1 << __REQ_QUEUED)
+#define REQ_ELVPRIV (1 << __REQ_ELVPRIV)
#define REQ_PC (1 << __REQ_PC)
#define REQ_BLOCK_PC (1 << __REQ_BLOCK_PC)
#define REQ_SENSE (1 << __REQ_SENSE)
@@ -333,6 +337,12 @@ struct request_queue
end_flush_fn *end_flush_fn;
/*
+ * Dispatch queue sorting
+ */
+ sector_t end_sector;
+ struct request *boundary_rq;
+
+ /*
* Auto-unplugging state
*/
struct timer_list unplug_timer;
@@ -354,7 +364,7 @@ struct request_queue
* queue needs bounce pages for pages above this limit
*/
unsigned long bounce_pfn;
- unsigned int bounce_gfp;
+ gfp_t bounce_gfp;
/*
* various queue flags, see QUEUE_* below
@@ -405,8 +415,6 @@ struct request_queue
unsigned int sg_reserved_size;
int node;
- struct list_head drain_list;
-
/*
* reserved for flush operations
*/
@@ -434,7 +442,7 @@ enum {
#define QUEUE_FLAG_DEAD 5 /* queue being torn down */
#define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */
#define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */
-#define QUEUE_FLAG_DRAIN 8 /* draining queue for sched switch */
+#define QUEUE_FLAG_ELVSWITCH 8 /* don't use elevator, just do FIFO */
#define QUEUE_FLAG_FLUSH 9 /* doing barrier flush sequence */
#define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags)
@@ -454,6 +462,7 @@ enum {
#define blk_pm_request(rq) \
((rq)->flags & (REQ_PM_SUSPEND | REQ_PM_RESUME))
+#define blk_sorted_rq(rq) ((rq)->flags & REQ_SORTED)
#define blk_barrier_rq(rq) ((rq)->flags & REQ_HARDBARRIER)
#define blk_barrier_preflush(rq) ((rq)->flags & REQ_BAR_PREFLUSH)
#define blk_barrier_postflush(rq) ((rq)->flags & REQ_BAR_POSTFLUSH)
@@ -550,7 +559,7 @@ extern void generic_make_request(struct bio *bio);
extern void blk_put_request(struct request *);
extern void blk_end_sync_rq(struct request *rq);
extern void blk_attempt_remerge(request_queue_t *, struct request *);
-extern struct request *blk_get_request(request_queue_t *, int, int);
+extern struct request *blk_get_request(request_queue_t *, int, gfp_t);
extern void blk_insert_request(request_queue_t *, struct request *, int, void *);
extern void blk_requeue_request(request_queue_t *, struct request *);
extern void blk_plug_device(request_queue_t *);
@@ -565,7 +574,7 @@ extern void blk_run_queue(request_queue_t *);
extern void blk_queue_activity_fn(request_queue_t *, activity_fn *, void *);
extern int blk_rq_map_user(request_queue_t *, struct request *, void __user *, unsigned int);
extern int blk_rq_unmap_user(struct bio *, unsigned int);
-extern int blk_rq_map_kern(request_queue_t *, struct request *, void *, unsigned int, unsigned int);
+extern int blk_rq_map_kern(request_queue_t *, struct request *, void *, unsigned int, gfp_t);
extern int blk_rq_map_user_iov(request_queue_t *, struct request *, struct sg_iovec *, int);
extern int blk_execute_rq(request_queue_t *, struct gendisk *,
struct request *, int);
@@ -611,12 +620,21 @@ extern void end_request(struct request *req, int uptodate);
static inline void blkdev_dequeue_request(struct request *req)
{
- BUG_ON(list_empty(&req->queuelist));
+ elv_dequeue_request(req->q, req);
+}
- list_del_init(&req->queuelist);
+/*
+ * This should be in elevator.h, but that requires pulling in rq and q
+ */
+static inline void elv_dispatch_add_tail(struct request_queue *q,
+ struct request *rq)
+{
+ if (q->last_merge == rq)
+ q->last_merge = NULL;
- if (req->rl)
- elv_remove_request(req->q, req);
+ q->end_sector = rq_end_sector(rq);
+ q->boundary_rq = rq;
+ list_add_tail(&rq->queuelist, &q->queue_head);
}
/*
@@ -650,12 +668,10 @@ extern void blk_dump_rq_flags(struct request *, char *);
extern void generic_unplug_device(request_queue_t *);
extern void __generic_unplug_device(request_queue_t *);
extern long nr_blockdev_pages(void);
-extern void blk_wait_queue_drained(request_queue_t *, int);
-extern void blk_finish_queue_drain(request_queue_t *);
int blk_get_queue(request_queue_t *);
-request_queue_t *blk_alloc_queue(int gfp_mask);
-request_queue_t *blk_alloc_queue_node(int,int);
+request_queue_t *blk_alloc_queue(gfp_t);
+request_queue_t *blk_alloc_queue_node(gfp_t, int);
#define blk_put_queue(q) blk_cleanup_queue((q))
/*
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index 6a1d154c082..88af42f5e04 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -188,7 +188,7 @@ extern int buffer_heads_over_limit;
* Generic address_space_operations implementations for buffer_head-backed
* address_spaces.
*/
-int try_to_release_page(struct page * page, int gfp_mask);
+int try_to_release_page(struct page * page, gfp_t gfp_mask);
int block_invalidatepage(struct page *page, unsigned long offset);
int block_write_full_page(struct page *page, get_block_t *get_block,
struct writeback_control *wbc);
diff --git a/include/linux/cyclomx.h b/include/linux/cyclomx.h
index 04fa7dff079..300d704bdb9 100644
--- a/include/linux/cyclomx.h
+++ b/include/linux/cyclomx.h
@@ -37,8 +37,6 @@
#include <linux/cycx_x25.h>
#endif
-#define is_digit(ch) (((ch)>=(unsigned)'0'&&(ch)<=(unsigned)'9')?1:0)
-
/* Adapter Data Space.
* This structure is needed because we handle multiple cards, otherwise
* static data would do it.
diff --git a/include/linux/cycx_drv.h b/include/linux/cycx_drv.h
index 6621df86a74..12fe6b0bfcf 100644
--- a/include/linux/cycx_drv.h
+++ b/include/linux/cycx_drv.h
@@ -60,6 +60,5 @@ extern int cycx_peek(struct cycx_hw *hw, u32 addr, void *buf, u32 len);
extern int cycx_poke(struct cycx_hw *hw, u32 addr, void *buf, u32 len);
extern int cycx_exec(void __iomem *addr);
-extern void cycx_inten(struct cycx_hw *hw);
extern void cycx_intr(struct cycx_hw *hw);
#endif /* _CYCX_DRV_H */
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index ea6bbc2d740..a74c27e460b 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -8,18 +8,17 @@ typedef void (elevator_merge_req_fn) (request_queue_t *, struct request *, struc
typedef void (elevator_merged_fn) (request_queue_t *, struct request *);
-typedef struct request *(elevator_next_req_fn) (request_queue_t *);
+typedef int (elevator_dispatch_fn) (request_queue_t *, int);
-typedef void (elevator_add_req_fn) (request_queue_t *, struct request *, int);
+typedef void (elevator_add_req_fn) (request_queue_t *, struct request *);
typedef int (elevator_queue_empty_fn) (request_queue_t *);
-typedef void (elevator_remove_req_fn) (request_queue_t *, struct request *);
-typedef void (elevator_requeue_req_fn) (request_queue_t *, struct request *);
typedef struct request *(elevator_request_list_fn) (request_queue_t *, struct request *);
typedef void (elevator_completed_req_fn) (request_queue_t *, struct request *);
typedef int (elevator_may_queue_fn) (request_queue_t *, int, struct bio *);
-typedef int (elevator_set_req_fn) (request_queue_t *, struct request *, struct bio *, int);
+typedef int (elevator_set_req_fn) (request_queue_t *, struct request *, struct bio *, gfp_t);
typedef void (elevator_put_req_fn) (request_queue_t *, struct request *);
+typedef void (elevator_activate_req_fn) (request_queue_t *, struct request *);
typedef void (elevator_deactivate_req_fn) (request_queue_t *, struct request *);
typedef int (elevator_init_fn) (request_queue_t *, elevator_t *);
@@ -31,10 +30,9 @@ struct elevator_ops
elevator_merged_fn *elevator_merged_fn;
elevator_merge_req_fn *elevator_merge_req_fn;
- elevator_next_req_fn *elevator_next_req_fn;
+ elevator_dispatch_fn *elevator_dispatch_fn;
elevator_add_req_fn *elevator_add_req_fn;
- elevator_remove_req_fn *elevator_remove_req_fn;
- elevator_requeue_req_fn *elevator_requeue_req_fn;
+ elevator_activate_req_fn *elevator_activate_req_fn;
elevator_deactivate_req_fn *elevator_deactivate_req_fn;
elevator_queue_empty_fn *elevator_queue_empty_fn;
@@ -81,15 +79,15 @@ struct elevator_queue
/*
* block elevator interface
*/
+extern void elv_dispatch_sort(request_queue_t *, struct request *);
extern void elv_add_request(request_queue_t *, struct request *, int, int);
extern void __elv_add_request(request_queue_t *, struct request *, int, int);
extern int elv_merge(request_queue_t *, struct request **, struct bio *);
extern void elv_merge_requests(request_queue_t *, struct request *,
struct request *);
extern void elv_merged_request(request_queue_t *, struct request *);
-extern void elv_remove_request(request_queue_t *, struct request *);
+extern void elv_dequeue_request(request_queue_t *, struct request *);
extern void elv_requeue_request(request_queue_t *, struct request *);
-extern void elv_deactivate_request(request_queue_t *, struct request *);
extern int elv_queue_empty(request_queue_t *);
extern struct request *elv_next_request(struct request_queue *q);
extern struct request *elv_former_request(request_queue_t *, struct request *);
@@ -98,7 +96,7 @@ extern int elv_register_queue(request_queue_t *q);
extern void elv_unregister_queue(request_queue_t *q);
extern int elv_may_queue(request_queue_t *, int, struct bio *);
extern void elv_completed_request(request_queue_t *, struct request *);
-extern int elv_set_request(request_queue_t *, struct request *, struct bio *, int);
+extern int elv_set_request(request_queue_t *, struct request *, struct bio *, gfp_t);
extern void elv_put_request(request_queue_t *, struct request *);
/*
@@ -142,4 +140,6 @@ enum {
ELV_MQUEUE_MUST,
};
+#define rq_end_sector(rq) ((rq)->sector + (rq)->nr_sectors)
+
#endif
diff --git a/include/linux/fs.h b/include/linux/fs.h
index e0b77c5af9a..f83d997c558 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -320,7 +320,7 @@ struct address_space_operations {
/* Unfortunately this kludge is needed for FIBMAP. Don't use it */
sector_t (*bmap)(struct address_space *, sector_t);
int (*invalidatepage) (struct page *, unsigned long);
- int (*releasepage) (struct page *, int);
+ int (*releasepage) (struct page *, gfp_t);
ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
loff_t offset, unsigned long nr_segs);
struct page* (*get_xip_page)(struct address_space *, sector_t,
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 01796c41c95..142e1c1e068 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -119,7 +119,7 @@ struct gendisk {
int policy;
atomic_t sync_io; /* RAID */
- unsigned long stamp, stamp_idle;
+ unsigned long stamp;
int in_flight;
#ifdef CONFIG_SMP
struct disk_stats *dkstats;
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 3010e172394..c3779432a72 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -12,8 +12,8 @@ struct vm_area_struct;
* GFP bitmasks..
*/
/* Zone modifiers in GFP_ZONEMASK (see linux/mmzone.h - low two bits) */
-#define __GFP_DMA 0x01u
-#define __GFP_HIGHMEM 0x02u
+#define __GFP_DMA ((__force gfp_t)0x01u)
+#define __GFP_HIGHMEM ((__force gfp_t)0x02u)
/*
* Action modifiers - doesn't change the zoning
@@ -26,24 +26,24 @@ struct vm_area_struct;
*
* __GFP_NORETRY: The VM implementation must not retry indefinitely.
*/
-#define __GFP_WAIT 0x10u /* Can wait and reschedule? */
-#define __GFP_HIGH 0x20u /* Should access emergency pools? */
-#define __GFP_IO 0x40u /* Can start physical IO? */
-#define __GFP_FS 0x80u /* Can call down to low-level FS? */
-#define __GFP_COLD 0x100u /* Cache-cold page required */
-#define __GFP_NOWARN 0x200u /* Suppress page allocation failure warning */
-#define __GFP_REPEAT 0x400u /* Retry the allocation. Might fail */
-#define __GFP_NOFAIL 0x800u /* Retry for ever. Cannot fail */
-#define __GFP_NORETRY 0x1000u /* Do not retry. Might fail */
-#define __GFP_NO_GROW 0x2000u /* Slab internal usage */
-#define __GFP_COMP 0x4000u /* Add compound page metadata */
-#define __GFP_ZERO 0x8000u /* Return zeroed page on success */
-#define __GFP_NOMEMALLOC 0x10000u /* Don't use emergency reserves */
-#define __GFP_NORECLAIM 0x20000u /* No realy zone reclaim during allocation */
-#define __GFP_HARDWALL 0x40000u /* Enforce hardwall cpuset memory allocs */
+#define __GFP_WAIT ((__force gfp_t)0x10u) /* Can wait and reschedule? */
+#define __GFP_HIGH ((__force gfp_t)0x20u) /* Should access emergency pools? */
+#define __GFP_IO ((__force gfp_t)0x40u) /* Can start physical IO? */
+#define __GFP_FS ((__force gfp_t)0x80u) /* Can call down to low-level FS? */
+#define __GFP_COLD ((__force gfp_t)0x100u) /* Cache-cold page required */
+#define __GFP_NOWARN ((__force gfp_t)0x200u) /* Suppress page allocation failure warning */
+#define __GFP_REPEAT ((__force gfp_t)0x400u) /* Retry the allocation. Might fail */
+#define __GFP_NOFAIL ((__force gfp_t)0x800u) /* Retry for ever. Cannot fail */
+#define __GFP_NORETRY ((__force gfp_t)0x1000u)/* Do not retry. Might fail */
+#define __GFP_NO_GROW ((__force gfp_t)0x2000u)/* Slab internal usage */
+#define __GFP_COMP ((__force gfp_t)0x4000u)/* Add compound page metadata */
+#define __GFP_ZERO ((__force gfp_t)0x8000u)/* Return zeroed page on success */
+#define __GFP_NOMEMALLOC ((__force gfp_t)0x10000u) /* Don't use emergency reserves */
+#define __GFP_NORECLAIM ((__force gfp_t)0x20000u) /* No realy zone reclaim during allocation */
+#define __GFP_HARDWALL ((__force gfp_t)0x40000u) /* Enforce hardwall cpuset memory allocs */
#define __GFP_BITS_SHIFT 20 /* Room for 20 __GFP_FOO bits */
-#define __GFP_BITS_MASK ((1 << __GFP_BITS_SHIFT) - 1)
+#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
/* if you forget to add the bitmask here kernel will crash, period */
#define GFP_LEVEL_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS| \
@@ -64,6 +64,7 @@ struct vm_area_struct;
#define GFP_DMA __GFP_DMA
+#define gfp_zone(mask) ((__force int)((mask) & (__force gfp_t)GFP_ZONEMASK))
/*
* There is only one page-allocator function, and two main namespaces to
@@ -94,7 +95,7 @@ static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
return NULL;
return __alloc_pages(gfp_mask, order,
- NODE_DATA(nid)->node_zonelists + (gfp_mask & GFP_ZONEMASK));
+ NODE_DATA(nid)->node_zonelists + gfp_zone(gfp_mask));
}
#ifdef CONFIG_NUMA
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 42cb7d70f9a..d664330d900 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -25,6 +25,8 @@ int is_hugepage_mem_enough(size_t);
unsigned long hugetlb_total_pages(void);
struct page *alloc_huge_page(void);
void free_huge_page(struct page *);
+int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+ unsigned long address, int write_access);
extern unsigned long max_huge_pages;
extern const unsigned long hugetlb_zero, hugetlb_infinity;
@@ -99,6 +101,7 @@ static inline unsigned long hugetlb_total_pages(void)
do { } while (0)
#define alloc_huge_page() ({ NULL; })
#define free_huge_page(p) ({ (void)(p); BUG(); })
+#define hugetlb_fault(mm, vma, addr, write) ({ BUG(); 0; })
#ifndef HPAGE_MASK
#define HPAGE_MASK 0 /* Keep the compiler happy */
@@ -155,24 +158,11 @@ static inline void set_file_hugepages(struct file *file)
{
file->f_op = &hugetlbfs_file_operations;
}
-
-static inline int valid_hugetlb_file_off(struct vm_area_struct *vma,
- unsigned long address)
-{
- struct inode *inode = vma->vm_file->f_dentry->d_inode;
- loff_t file_off = address - vma->vm_start;
-
- file_off += (vma->vm_pgoff << PAGE_SHIFT);
-
- return (file_off < inode->i_size);
-}
-
#else /* !CONFIG_HUGETLBFS */
#define is_file_hugepages(file) 0
#define set_file_hugepages(file) BUG()
#define hugetlb_zero_setup(size) ERR_PTR(-ENOSYS)
-#define valid_hugetlb_file_off(vma, address) 0
#endif /* !CONFIG_HUGETLBFS */
diff --git a/include/linux/i2o.h b/include/linux/i2o.h
index bdc286ec947..b4af45aad25 100644
--- a/include/linux/i2o.h
+++ b/include/linux/i2o.h
@@ -492,7 +492,7 @@ static inline int i2o_dma_map_sg(struct i2o_controller *c,
* Returns 0 on success or -ENOMEM on failure.
*/
static inline int i2o_dma_alloc(struct device *dev, struct i2o_dma *addr,
- size_t len, unsigned int gfp_mask)
+ size_t len, gfp_t gfp_mask)
{
struct pci_dev *pdev = to_pci_dev(dev);
int dma_64 = 0;
@@ -551,7 +551,7 @@ static inline void i2o_dma_free(struct device *dev, struct i2o_dma *addr)
* Returns the 0 on success or negative error code on failure.
*/
static inline int i2o_dma_realloc(struct device *dev, struct i2o_dma *addr,
- size_t len, unsigned int gfp_mask)
+ size_t len, gfp_t gfp_mask)
{
i2o_dma_free(dev, addr);
diff --git a/include/linux/ibmtr.h b/include/linux/ibmtr.h
index 2ef0b21517f..1c7a0dd5536 100644
--- a/include/linux/ibmtr.h
+++ b/include/linux/ibmtr.h
@@ -7,8 +7,8 @@
/* ported to the Alpha architecture 02/20/96 (just used the HZ macro) */
#define TR_RETRY_INTERVAL (30*HZ) /* 500 on PC = 5 s */
-#define TR_RST_TIME (HZ/20) /* 5 on PC = 50 ms */
-#define TR_BUSY_INTERVAL (HZ/5) /* 5 on PC = 200 ms */
+#define TR_RST_TIME (msecs_to_jiffies(50)) /* 5 on PC = 50 ms */
+#define TR_BUSY_INTERVAL (msecs_to_jiffies(200)) /* 5 on PC = 200 ms */
#define TR_SPIN_INTERVAL (3*HZ) /* 3 seconds before init timeout */
#define TR_ISA 1
diff --git a/include/linux/idr.h b/include/linux/idr.h
index ca3b7e46257..7fb3ff9c7b0 100644
--- a/include/linux/idr.h
+++ b/include/linux/idr.h
@@ -71,8 +71,9 @@ struct idr {
*/
void *idr_find(struct idr *idp, int id);
-int idr_pre_get(struct idr *idp, unsigned gfp_mask);
+int idr_pre_get(struct idr *idp, gfp_t gfp_mask);
int idr_get_new(struct idr *idp, void *ptr, int *id);
int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id);
void idr_remove(struct idr *idp, int id);
+void idr_destroy(struct idr *idp);
void idr_init(struct idr *idp);
diff --git a/include/linux/if_arp.h b/include/linux/if_arp.h
index 0856548a2a0..a8b1a207183 100644
--- a/include/linux/if_arp.h
+++ b/include/linux/if_arp.h
@@ -84,6 +84,7 @@
#define ARPHRD_IEEE802_TR 800 /* Magic type ident for TR */
#define ARPHRD_IEEE80211 801 /* IEEE 802.11 */
#define ARPHRD_IEEE80211_PRISM 802 /* IEEE 802.11 + Prism2 header */
+#define ARPHRD_IEEE80211_RADIOTAP 803 /* IEEE 802.11 + radiotap header */
#define ARPHRD_VOID 0xFFFF /* Void type, nothing is known */
#define ARPHRD_NONE 0xFFFE /* zero header length */
diff --git a/include/linux/jbd.h b/include/linux/jbd.h
index ff853b3173c..be197eb9007 100644
--- a/include/linux/jbd.h
+++ b/include/linux/jbd.h
@@ -69,7 +69,7 @@ extern int journal_enable_debug;
#define jbd_debug(f, a...) /**/
#endif
-extern void * __jbd_kmalloc (const char *where, size_t size, int flags, int retry);
+extern void * __jbd_kmalloc (const char *where, size_t size, gfp_t flags, int retry);
#define jbd_kmalloc(size, flags) \
__jbd_kmalloc(__FUNCTION__, (size), (flags), journal_oom_retry)
#define jbd_rep_kmalloc(size, flags) \
@@ -890,7 +890,7 @@ extern int journal_forget (handle_t *, struct buffer_head *);
extern void journal_sync_buffer (struct buffer_head *);
extern int journal_invalidatepage(journal_t *,
struct page *, unsigned long);
-extern int journal_try_to_free_buffers(journal_t *, struct page *, int);
+extern int journal_try_to_free_buffers(journal_t *, struct page *, gfp_t);
extern int journal_stop(handle_t *);
extern int journal_flush (journal_t *);
extern void journal_lock_updates (journal_t *);
diff --git a/include/linux/kobject.h b/include/linux/kobject.h
index 3b22304f12f..7f7403aa4a4 100644
--- a/include/linux/kobject.h
+++ b/include/linux/kobject.h
@@ -65,7 +65,7 @@ extern void kobject_unregister(struct kobject *);
extern struct kobject * kobject_get(struct kobject *);
extern void kobject_put(struct kobject *);
-extern char * kobject_get_path(struct kobject *, int);
+extern char * kobject_get_path(struct kobject *, gfp_t);
struct kobj_type {
void (*release)(struct kobject *);
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 0e214f8c8f9..00a8a573885 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -91,7 +91,7 @@ enum {
ATA_SHT_EMULATED = 1,
ATA_SHT_CMD_PER_LUN = 1,
ATA_SHT_THIS_ID = -1,
- ATA_SHT_USE_CLUSTERING = 0,
+ ATA_SHT_USE_CLUSTERING = 1,
/* struct ata_device stuff */
ATA_DFLAG_LBA48 = (1 << 0), /* device supports LBA48 */
@@ -202,7 +202,7 @@ struct ata_ioports {
struct ata_probe_ent {
struct list_head node;
struct device *dev;
- struct ata_port_operations *port_ops;
+ const struct ata_port_operations *port_ops;
Scsi_Host_Template *sht;
struct ata_ioports port[ATA_MAX_PORTS];
unsigned int n_ports;
@@ -225,7 +225,7 @@ struct ata_host_set {
void __iomem *mmio_base;
unsigned int n_ports;
void *private_data;
- struct ata_port_operations *ops;
+ const struct ata_port_operations *ops;
struct ata_port * ports[0];
};
@@ -294,7 +294,7 @@ struct ata_device {
struct ata_port {
struct Scsi_Host *host; /* our co-allocated scsi host */
- struct ata_port_operations *ops;
+ const struct ata_port_operations *ops;
unsigned long flags; /* ATA_FLAG_xxx */
unsigned int id; /* unique id req'd by scsi midlyr */
unsigned int port_no; /* unique port #; from zero */
@@ -341,10 +341,10 @@ struct ata_port_operations {
void (*set_piomode) (struct ata_port *, struct ata_device *);
void (*set_dmamode) (struct ata_port *, struct ata_device *);
- void (*tf_load) (struct ata_port *ap, struct ata_taskfile *tf);
+ void (*tf_load) (struct ata_port *ap, const struct ata_taskfile *tf);
void (*tf_read) (struct ata_port *ap, struct ata_taskfile *tf);
- void (*exec_command)(struct ata_port *ap, struct ata_taskfile *tf);
+ void (*exec_command)(struct ata_port *ap, const struct ata_taskfile *tf);
u8 (*check_status)(struct ata_port *ap);
u8 (*check_altstatus)(struct ata_port *ap);
u8 (*check_err)(struct ata_port *ap);
@@ -385,7 +385,7 @@ struct ata_port_info {
unsigned long pio_mask;
unsigned long mwdma_mask;
unsigned long udma_mask;
- struct ata_port_operations *port_ops;
+ const struct ata_port_operations *port_ops;
};
struct ata_timing {
@@ -413,7 +413,7 @@ extern int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_i
unsigned int n_ports);
extern void ata_pci_remove_one (struct pci_dev *pdev);
#endif /* CONFIG_PCI */
-extern int ata_device_add(struct ata_probe_ent *ent);
+extern int ata_device_add(const struct ata_probe_ent *ent);
extern void ata_host_set_remove(struct ata_host_set *host_set);
extern int ata_scsi_detect(Scsi_Host_Template *sht);
extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
@@ -426,16 +426,16 @@ extern int ata_ratelimit(void);
/*
* Default driver ops implementations
*/
-extern void ata_tf_load(struct ata_port *ap, struct ata_taskfile *tf);
+extern void ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf);
extern void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
-extern void ata_tf_to_fis(struct ata_taskfile *tf, u8 *fis, u8 pmp);
-extern void ata_tf_from_fis(u8 *fis, struct ata_taskfile *tf);
+extern void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp);
+extern void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf);
extern void ata_noop_dev_select (struct ata_port *ap, unsigned int device);
extern void ata_std_dev_select (struct ata_port *ap, unsigned int device);
extern u8 ata_check_status(struct ata_port *ap);
extern u8 ata_altstatus(struct ata_port *ap);
extern u8 ata_chk_err(struct ata_port *ap);
-extern void ata_exec_command(struct ata_port *ap, struct ata_taskfile *tf);
+extern void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf);
extern int ata_port_start (struct ata_port *ap);
extern void ata_port_stop (struct ata_port *ap);
extern void ata_host_stop (struct ata_host_set *host_set);
@@ -446,8 +446,8 @@ extern void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf,
unsigned int buflen);
extern void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
unsigned int n_elem);
-extern unsigned int ata_dev_classify(struct ata_taskfile *tf);
-extern void ata_dev_id_string(u16 *id, unsigned char *s,
+extern unsigned int ata_dev_classify(const struct ata_taskfile *tf);
+extern void ata_dev_id_string(const u16 *id, unsigned char *s,
unsigned int ofs, unsigned int len);
extern void ata_dev_config(struct ata_port *ap, unsigned int i);
extern void ata_bmdma_setup (struct ata_queued_cmd *qc);
@@ -502,7 +502,7 @@ struct pci_bits {
extern void ata_pci_host_stop (struct ata_host_set *host_set);
extern struct ata_probe_ent *
ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int portmask);
-extern int pci_test_config_bits(struct pci_dev *pdev, struct pci_bits *bits);
+extern int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits);
#endif /* CONFIG_PCI */
@@ -512,7 +512,7 @@ static inline unsigned int ata_tag_valid(unsigned int tag)
return (tag < ATA_MAX_QUEUE) ? 1 : 0;
}
-static inline unsigned int ata_dev_present(struct ata_device *dev)
+static inline unsigned int ata_dev_present(const struct ata_device *dev)
{
return ((dev->class == ATA_DEV_ATA) ||
(dev->class == ATA_DEV_ATAPI));
@@ -711,7 +711,7 @@ static inline unsigned int sata_dev_present(struct ata_port *ap)
return ((scr_read(ap, SCR_STATUS) & 0xf) == 0x3) ? 1 : 0;
}
-static inline int ata_try_flush_cache(struct ata_device *dev)
+static inline int ata_try_flush_cache(const struct ata_device *dev)
{
return ata_id_wcache_enabled(dev->id) ||
ata_id_has_flush(dev->id) ||
diff --git a/include/linux/loop.h b/include/linux/loop.h
index 53fa5159544..40f63c9879d 100644
--- a/include/linux/loop.h
+++ b/include/linux/loop.h
@@ -52,7 +52,7 @@ struct loop_device {
unsigned lo_blocksize;
void *key_data;
- int old_gfp_mask;
+ gfp_t old_gfp_mask;
spinlock_t lo_lock;
struct bio *lo_bio;
diff --git a/include/linux/mbcache.h b/include/linux/mbcache.h
index 9263d2db2d6..99e044b4efc 100644
--- a/include/linux/mbcache.h
+++ b/include/linux/mbcache.h
@@ -22,7 +22,7 @@ struct mb_cache_entry {
};
struct mb_cache_op {
- int (*free)(struct mb_cache_entry *, int);
+ int (*free)(struct mb_cache_entry *, gfp_t);
};
/* Functions on caches */
diff --git a/include/linux/mii.h b/include/linux/mii.h
index 9b8d0476988..68f5a0f392d 100644
--- a/include/linux/mii.h
+++ b/include/linux/mii.h
@@ -158,6 +158,7 @@ extern int mii_link_ok (struct mii_if_info *mii);
extern int mii_nway_restart (struct mii_if_info *mii);
extern int mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd);
extern int mii_ethtool_sset(struct mii_if_info *mii, struct ethtool_cmd *ecmd);
+extern int mii_check_gmii_support(struct mii_if_info *mii);
extern void mii_check_link (struct mii_if_info *mii);
extern unsigned int mii_check_media (struct mii_if_info *mii,
unsigned int ok_to_print,
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 097b3a3c693..e1649578fb0 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -747,7 +747,7 @@ extern unsigned long do_mremap(unsigned long addr,
* The callback will be passed nr_to_scan == 0 when the VM is querying the
* cache size, so a fastpath for that case is appropriate.
*/
-typedef int (*shrinker_t)(int nr_to_scan, unsigned int gfp_mask);
+typedef int (*shrinker_t)(int nr_to_scan, gfp_t gfp_mask);
/*
* Add an aging callback. The int is the number of 'seeks' it takes
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 5ed471b58f4..7519eb4191e 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -302,7 +302,7 @@ void get_zone_counts(unsigned long *active, unsigned long *inactive,
void build_all_zonelists(void);
void wakeup_kswapd(struct zone *zone, int order);
int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
- int alloc_type, int can_try_harder, int gfp_high);
+ int alloc_type, int can_try_harder, gfp_t gfp_high);
#ifdef CONFIG_HAVE_MEMORY_PRESENT
void memory_present(int nid, unsigned long start, unsigned long end);
diff --git a/include/linux/namei.h b/include/linux/namei.h
index 7db67b008ca..1c975d0d9e9 100644
--- a/include/linux/namei.h
+++ b/include/linux/namei.h
@@ -8,6 +8,7 @@ struct vfsmount;
struct open_intent {
int flags;
int create_mode;
+ struct file *file;
};
enum { MAX_NESTED_LINKS = 5 };
@@ -65,6 +66,13 @@ extern int FASTCALL(link_path_walk(const char *, struct nameidata *));
extern void path_release(struct nameidata *);
extern void path_release_on_umount(struct nameidata *);
+extern int __user_path_lookup_open(const char __user *, unsigned lookup_flags, struct nameidata *nd, int open_flags);
+extern int path_lookup_open(const char *, unsigned lookup_flags, struct nameidata *, int open_flags);
+extern struct file *lookup_instantiate_filp(struct nameidata *nd, struct dentry *dentry,
+ int (*open)(struct inode *, struct file *));
+extern struct file *nameidata_to_filp(struct nameidata *nd, int flags);
+extern void release_open_intent(struct nameidata *);
+
extern struct dentry * lookup_one_len(const char *, struct dentry *, int);
extern struct dentry * lookup_hash(struct qstr *, struct dentry *);
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 368e4c825ff..a9281b24c40 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -873,11 +873,9 @@ static inline void netif_rx_complete(struct net_device *dev)
static inline void netif_poll_disable(struct net_device *dev)
{
- while (test_and_set_bit(__LINK_STATE_RX_SCHED, &dev->state)) {
+ while (test_and_set_bit(__LINK_STATE_RX_SCHED, &dev->state))
/* No hurry. */
- current->state = TASK_INTERRUPTIBLE;
- schedule_timeout(1);
- }
+ schedule_timeout_interruptible(1);
}
static inline void netif_poll_enable(struct net_device *dev)
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 9a6047ff1b2..325fe7ae49b 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -41,6 +41,10 @@
#define NFS_MAX_FILE_IO_BUFFER_SIZE 32768
#define NFS_DEF_FILE_IO_BUFFER_SIZE 4096
+/* Default timeout values */
+#define NFS_MAX_UDP_TIMEOUT (60*HZ)
+#define NFS_MAX_TCP_TIMEOUT (600*HZ)
+
/*
* superblock magic number for NFS
*/
@@ -137,6 +141,7 @@ struct nfs_inode {
unsigned long attrtimeo_timestamp;
__u64 change_attr; /* v4 only */
+ unsigned long last_updated;
/* "Generation counter" for the attribute cache. This is
* bumped whenever we update the metadata on the
* server.
@@ -236,13 +241,17 @@ static inline int nfs_caches_unstable(struct inode *inode)
return atomic_read(&NFS_I(inode)->data_updates) != 0;
}
+static inline void nfs_mark_for_revalidate(struct inode *inode)
+{
+ spin_lock(&inode->i_lock);
+ NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATTR | NFS_INO_INVALID_ACCESS;
+ spin_unlock(&inode->i_lock);
+}
+
static inline void NFS_CACHEINV(struct inode *inode)
{
- if (!nfs_caches_unstable(inode)) {
- spin_lock(&inode->i_lock);
- NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATTR | NFS_INO_INVALID_ACCESS;
- spin_unlock(&inode->i_lock);
- }
+ if (!nfs_caches_unstable(inode))
+ nfs_mark_for_revalidate(inode);
}
static inline int nfs_server_capable(struct inode *inode, int cap)
@@ -276,7 +285,7 @@ static inline long nfs_save_change_attribute(struct inode *inode)
static inline int nfs_verify_change_attribute(struct inode *inode, unsigned long chattr)
{
return !nfs_caches_unstable(inode)
- && chattr == NFS_I(inode)->cache_change_attribute;
+ && time_after_eq(chattr, NFS_I(inode)->cache_change_attribute);
}
/*
@@ -286,6 +295,7 @@ extern void nfs_zap_caches(struct inode *);
extern struct inode *nfs_fhget(struct super_block *, struct nfs_fh *,
struct nfs_fattr *);
extern int nfs_refresh_inode(struct inode *, struct nfs_fattr *);
+extern int nfs_post_op_update_inode(struct inode *inode, struct nfs_fattr *fattr);
extern int nfs_getattr(struct vfsmount *, struct dentry *, struct kstat *);
extern int nfs_permission(struct inode *, int, struct nameidata *);
extern int nfs_access_get_cached(struct inode *, struct rpc_cred *, struct nfs_access_entry *);
@@ -312,6 +322,12 @@ extern void nfs_file_clear_open_context(struct file *filp);
/* linux/net/ipv4/ipconfig.c: trims ip addr off front of name, too. */
extern u32 root_nfs_parse_addr(char *name); /*__init*/
+static inline void nfs_fattr_init(struct nfs_fattr *fattr)
+{
+ fattr->valid = 0;
+ fattr->time_start = jiffies;
+}
+
/*
* linux/fs/nfs/file.c
*/
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index a2bf6914ff1..40718669b9c 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -41,7 +41,7 @@ struct nfs_fattr {
__u32 bitmap[2]; /* NFSv4 returned attribute bitmap */
__u64 change_attr; /* NFSv4 change attribute */
__u64 pre_change_attr;/* pre-op NFSv4 change attribute */
- unsigned long timestamp;
+ unsigned long time_start;
};
#define NFS_ATTR_WCC 0x0001 /* pre-op WCC data */
@@ -96,12 +96,13 @@ struct nfs4_change_info {
u64 after;
};
+struct nfs_seqid;
/*
* Arguments to the open call.
*/
struct nfs_openargs {
const struct nfs_fh * fh;
- __u32 seqid;
+ struct nfs_seqid * seqid;
int open_flags;
__u64 clientid;
__u32 id;
@@ -123,6 +124,7 @@ struct nfs_openres {
struct nfs4_change_info cinfo;
__u32 rflags;
struct nfs_fattr * f_attr;
+ struct nfs_fattr * dir_attr;
const struct nfs_server *server;
int delegation_type;
nfs4_stateid delegation;
@@ -136,7 +138,7 @@ struct nfs_openres {
struct nfs_open_confirmargs {
const struct nfs_fh * fh;
nfs4_stateid stateid;
- __u32 seqid;
+ struct nfs_seqid * seqid;
};
struct nfs_open_confirmres {
@@ -148,13 +150,16 @@ struct nfs_open_confirmres {
*/
struct nfs_closeargs {
struct nfs_fh * fh;
- nfs4_stateid stateid;
- __u32 seqid;
+ nfs4_stateid * stateid;
+ struct nfs_seqid * seqid;
int open_flags;
+ const u32 * bitmask;
};
struct nfs_closeres {
nfs4_stateid stateid;
+ struct nfs_fattr * fattr;
+ const struct nfs_server *server;
};
/*
* * Arguments to the lock,lockt, and locku call.
@@ -164,30 +169,19 @@ struct nfs_lowner {
u32 id;
};
-struct nfs_open_to_lock {
- __u32 open_seqid;
- nfs4_stateid open_stateid;
- __u32 lock_seqid;
- struct nfs_lowner lock_owner;
-};
-
-struct nfs_exist_lock {
- nfs4_stateid stateid;
- __u32 seqid;
-};
-
struct nfs_lock_opargs {
+ struct nfs_seqid * lock_seqid;
+ nfs4_stateid * lock_stateid;
+ struct nfs_seqid * open_seqid;
+ nfs4_stateid * open_stateid;
+ struct nfs_lowner lock_owner;
__u32 reclaim;
__u32 new_lock_owner;
- union {
- struct nfs_open_to_lock *open_lock;
- struct nfs_exist_lock *exist_lock;
- } u;
};
struct nfs_locku_opargs {
- __u32 seqid;
- nfs4_stateid stateid;
+ struct nfs_seqid * seqid;
+ nfs4_stateid * stateid;
};
struct nfs_lockargs {
@@ -262,6 +256,7 @@ struct nfs_writeargs {
enum nfs3_stable_how stable;
unsigned int pgbase;
struct page ** pages;
+ const u32 * bitmask;
};
struct nfs_writeverf {
@@ -273,6 +268,7 @@ struct nfs_writeres {
struct nfs_fattr * fattr;
struct nfs_writeverf * verf;
__u32 count;
+ const struct nfs_server *server;
};
/*
@@ -550,6 +546,7 @@ struct nfs4_create_res {
struct nfs_fh * fh;
struct nfs_fattr * fattr;
struct nfs4_change_info dir_cinfo;
+ struct nfs_fattr * dir_fattr;
};
struct nfs4_fsinfo_arg {
@@ -571,8 +568,17 @@ struct nfs4_link_arg {
const struct nfs_fh * fh;
const struct nfs_fh * dir_fh;
const struct qstr * name;
+ const u32 * bitmask;
+};
+
+struct nfs4_link_res {
+ const struct nfs_server * server;
+ struct nfs_fattr * fattr;
+ struct nfs4_change_info cinfo;
+ struct nfs_fattr * dir_attr;
};
+
struct nfs4_lookup_arg {
const struct nfs_fh * dir_fh;
const struct qstr * name;
@@ -619,6 +625,13 @@ struct nfs4_readlink {
struct nfs4_remove_arg {
const struct nfs_fh * fh;
const struct qstr * name;
+ const u32 * bitmask;
+};
+
+struct nfs4_remove_res {
+ const struct nfs_server * server;
+ struct nfs4_change_info cinfo;
+ struct nfs_fattr * dir_attr;
};
struct nfs4_rename_arg {
@@ -626,11 +639,15 @@ struct nfs4_rename_arg {
const struct nfs_fh * new_dir;
const struct qstr * old_name;
const struct qstr * new_name;
+ const u32 * bitmask;
};
struct nfs4_rename_res {
+ const struct nfs_server * server;
struct nfs4_change_info old_cinfo;
+ struct nfs_fattr * old_fattr;
struct nfs4_change_info new_cinfo;
+ struct nfs_fattr * new_fattr;
};
struct nfs4_setclientid {
@@ -722,7 +739,7 @@ struct nfs_rpc_ops {
int (*write) (struct nfs_write_data *);
int (*commit) (struct nfs_write_data *);
int (*create) (struct inode *, struct dentry *,
- struct iattr *, int);
+ struct iattr *, int, struct nameidata *);
int (*remove) (struct inode *, struct qstr *);
int (*unlink_setup) (struct rpc_message *,
struct dentry *, struct qstr *);
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index acbf31c154f..ba6c310a055 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -21,16 +21,17 @@
static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
{
- return mapping->flags & __GFP_BITS_MASK;
+ return (__force gfp_t)mapping->flags & __GFP_BITS_MASK;
}
/*
* This is non-atomic. Only to be used before the mapping is activated.
* Probably needs a barrier...
*/
-static inline void mapping_set_gfp_mask(struct address_space *m, int mask)
+static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
{
- m->flags = (m->flags & ~__GFP_BITS_MASK) | mask;
+ m->flags = (m->flags & ~(__force unsigned long)__GFP_BITS_MASK) |
+ (__force unsigned long)mask;
}
/*
@@ -69,7 +70,7 @@ extern struct page * find_lock_page(struct address_space *mapping,
extern struct page * find_trylock_page(struct address_space *mapping,
unsigned long index);
extern struct page * find_or_create_page(struct address_space *mapping,
- unsigned long index, unsigned int gfp_mask);
+ unsigned long index, gfp_t gfp_mask);
unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
unsigned int nr_pages, struct page **pages);
unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
@@ -92,9 +93,9 @@ extern int read_cache_pages(struct address_space *mapping,
struct list_head *pages, filler_t *filler, void *data);
int add_to_page_cache(struct page *page, struct address_space *mapping,
- unsigned long index, int gfp_mask);
+ unsigned long index, gfp_t gfp_mask);
int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
- unsigned long index, int gfp_mask);
+ unsigned long index, gfp_t gfp_mask);
extern void remove_from_page_cache(struct page *page);
extern void __remove_from_page_cache(struct page *page);
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index f74ed946247..71834f05504 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -723,6 +723,7 @@
#define PCI_DEVICE_ID_HP_DIVA_EVEREST 0x1282
#define PCI_DEVICE_ID_HP_DIVA_AUX 0x1290
#define PCI_DEVICE_ID_HP_DIVA_RMP3 0x1301
+#define PCI_DEVICE_ID_HP_DIVA_HURRICANE 0x132a
#define PCI_DEVICE_ID_HP_CISS 0x3210
#define PCI_DEVICE_ID_HP_CISSA 0x3220
#define PCI_DEVICE_ID_HP_CISSB 0x3222
@@ -2696,6 +2697,7 @@
#define PCI_SUBVENDOR_ID_EXSYS 0xd84d
#define PCI_SUBDEVICE_ID_EXSYS_4014 0x4014
+#define PCI_SUBDEVICE_ID_EXSYS_4055 0x4055
#define PCI_VENDOR_ID_TIGERJET 0xe159
#define PCI_DEVICE_ID_TIGERJET_300 0x0001
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index 045d4761feb..9f0f9281f42 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -24,7 +24,7 @@
struct radix_tree_root {
unsigned int height;
- unsigned int gfp_mask;
+ gfp_t gfp_mask;
struct radix_tree_node *rnode;
};
diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
index af00b10294c..001ab82df05 100644
--- a/include/linux/reiserfs_fs.h
+++ b/include/linux/reiserfs_fs.h
@@ -1972,7 +1972,7 @@ extern struct address_space_operations reiserfs_address_space_operations;
/* fix_nodes.c */
#ifdef CONFIG_REISERFS_CHECK
-void *reiserfs_kmalloc(size_t size, int flags, struct super_block *s);
+void *reiserfs_kmalloc(size_t size, gfp_t flags, struct super_block *s);
void reiserfs_kfree(const void *vp, size_t size, struct super_block *s);
#else
static inline void *reiserfs_kmalloc(size_t size, int flags,
diff --git a/include/linux/sdladrv.h b/include/linux/sdladrv.h
index 78f634007fc..c85e103d5e7 100644
--- a/include/linux/sdladrv.h
+++ b/include/linux/sdladrv.h
@@ -52,12 +52,8 @@ typedef struct sdlahw
extern int sdla_setup (sdlahw_t* hw, void* sfm, unsigned len);
extern int sdla_down (sdlahw_t* hw);
-extern int sdla_inten (sdlahw_t* hw);
-extern int sdla_intde (sdlahw_t* hw);
-extern int sdla_intack (sdlahw_t* hw);
extern void S514_intack (sdlahw_t* hw, u32 int_status);
extern void read_S514_int_stat (sdlahw_t* hw, u32* int_status);
-extern int sdla_intr (sdlahw_t* hw);
extern int sdla_mapmem (sdlahw_t* hw, unsigned long addr);
extern int sdla_peek (sdlahw_t* hw, unsigned long addr, void* buf,
unsigned len);
diff --git a/include/linux/security.h b/include/linux/security.h
index 627382e7405..dac956ed98f 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -1210,7 +1210,7 @@ struct security_operations {
int (*socket_shutdown) (struct socket * sock, int how);
int (*socket_sock_rcv_skb) (struct sock * sk, struct sk_buff * skb);
int (*socket_getpeersec) (struct socket *sock, char __user *optval, int __user *optlen, unsigned len);
- int (*sk_alloc_security) (struct sock *sk, int family, int priority);
+ int (*sk_alloc_security) (struct sock *sk, int family, gfp_t priority);
void (*sk_free_security) (struct sock *sk);
#endif /* CONFIG_SECURITY_NETWORK */
};
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 8f5d9e7f873..b756935da9c 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -171,7 +171,6 @@ enum {
* struct sk_buff - socket buffer
* @next: Next buffer in list
* @prev: Previous buffer in list
- * @list: List we are on
* @sk: Socket we are owned by
* @tstamp: Time we arrived
* @dev: Device we arrived on/are leaving by
@@ -190,6 +189,7 @@ enum {
* @cloned: Head may be cloned (check refcnt to be sure)
* @nohdr: Payload reference only, must not modify header
* @pkt_type: Packet class
+ * @fclone: skbuff clone status
* @ip_summed: Driver fed us an IP checksum
* @priority: Packet queueing priority
* @users: User count - see {datagram,tcp}.c
@@ -202,6 +202,7 @@ enum {
* @destructor: Destruct function
* @nfmark: Can be used for communication between hooks
* @nfct: Associated connection, if any
+ * @ipvs_property: skbuff is owned by ipvs
* @nfctinfo: Relationship of this skb to the connection
* @nf_bridge: Saved data about a bridged frame - see br_netfilter.c
* @tc_index: Traffic control index
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 5fc04a16ecb..09b9aa60063 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -121,7 +121,7 @@ extern unsigned int ksize(const void *);
extern void *kmem_cache_alloc_node(kmem_cache_t *, gfp_t flags, int node);
extern void *kmalloc_node(size_t size, gfp_t flags, int node);
#else
-static inline void *kmem_cache_alloc_node(kmem_cache_t *cachep, int flags, int node)
+static inline void *kmem_cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int node)
{
return kmem_cache_alloc(cachep, flags);
}
diff --git a/include/linux/sunrpc/auth.h b/include/linux/sunrpc/auth.h
index 04ebc24db34..b68c11a2d6d 100644
--- a/include/linux/sunrpc/auth.h
+++ b/include/linux/sunrpc/auth.h
@@ -66,7 +66,12 @@ struct rpc_cred_cache {
struct rpc_auth {
unsigned int au_cslack; /* call cred size estimate */
- unsigned int au_rslack; /* reply verf size guess */
+ /* guess at number of u32's auth adds before
+ * reply data; normally the verifier size: */
+ unsigned int au_rslack;
+ /* for gss, used to calculate au_rslack: */
+ unsigned int au_verfsize;
+
unsigned int au_flags; /* various flags */
struct rpc_authops * au_ops; /* operations */
rpc_authflavor_t au_flavor; /* pseudoflavor (note may
diff --git a/include/linux/sunrpc/debug.h b/include/linux/sunrpc/debug.h
index eadb31e3c19..1a42d902bc1 100644
--- a/include/linux/sunrpc/debug.h
+++ b/include/linux/sunrpc/debug.h
@@ -32,6 +32,7 @@
#define RPCDBG_AUTH 0x0010
#define RPCDBG_PMAP 0x0020
#define RPCDBG_SCHED 0x0040
+#define RPCDBG_TRANS 0x0080
#define RPCDBG_SVCSOCK 0x0100
#define RPCDBG_SVCDSP 0x0200
#define RPCDBG_MISC 0x0400
@@ -94,6 +95,8 @@ enum {
CTL_NLMDEBUG,
CTL_SLOTTABLE_UDP,
CTL_SLOTTABLE_TCP,
+ CTL_MIN_RESVPORT,
+ CTL_MAX_RESVPORT,
};
#endif /* _LINUX_SUNRPC_DEBUG_H_ */
diff --git a/include/linux/sunrpc/gss_api.h b/include/linux/sunrpc/gss_api.h
index 689262f6305..9b8bcf125c1 100644
--- a/include/linux/sunrpc/gss_api.h
+++ b/include/linux/sunrpc/gss_api.h
@@ -40,14 +40,21 @@ int gss_import_sec_context(
struct gss_ctx **ctx_id);
u32 gss_get_mic(
struct gss_ctx *ctx_id,
- u32 qop,
struct xdr_buf *message,
struct xdr_netobj *mic_token);
u32 gss_verify_mic(
struct gss_ctx *ctx_id,
struct xdr_buf *message,
- struct xdr_netobj *mic_token,
- u32 *qstate);
+ struct xdr_netobj *mic_token);
+u32 gss_wrap(
+ struct gss_ctx *ctx_id,
+ int offset,
+ struct xdr_buf *outbuf,
+ struct page **inpages);
+u32 gss_unwrap(
+ struct gss_ctx *ctx_id,
+ int offset,
+ struct xdr_buf *inbuf);
u32 gss_delete_sec_context(
struct gss_ctx **ctx_id);
@@ -56,7 +63,6 @@ char *gss_service_to_auth_domain_name(struct gss_api_mech *, u32 service);
struct pf_desc {
u32 pseudoflavor;
- u32 qop;
u32 service;
char *name;
char *auth_domain_name;
@@ -85,14 +91,21 @@ struct gss_api_ops {
struct gss_ctx *ctx_id);
u32 (*gss_get_mic)(
struct gss_ctx *ctx_id,
- u32 qop,
struct xdr_buf *message,
struct xdr_netobj *mic_token);
u32 (*gss_verify_mic)(
struct gss_ctx *ctx_id,
struct xdr_buf *message,
- struct xdr_netobj *mic_token,
- u32 *qstate);
+ struct xdr_netobj *mic_token);
+ u32 (*gss_wrap)(
+ struct gss_ctx *ctx_id,
+ int offset,
+ struct xdr_buf *outbuf,
+ struct page **inpages);
+ u32 (*gss_unwrap)(
+ struct gss_ctx *ctx_id,
+ int offset,
+ struct xdr_buf *buf);
void (*gss_delete_sec_context)(
void *internal_ctx_id);
};
diff --git a/include/linux/sunrpc/gss_err.h b/include/linux/sunrpc/gss_err.h
index 92608a2e574..a6807867bd2 100644
--- a/include/linux/sunrpc/gss_err.h
+++ b/include/linux/sunrpc/gss_err.h
@@ -66,16 +66,6 @@ typedef unsigned int OM_uint32;
/*
- * Define the default Quality of Protection for per-message services. Note
- * that an implementation that offers multiple levels of QOP may either reserve
- * a value (for example zero, as assumed here) to mean "default protection", or
- * alternatively may simply equate GSS_C_QOP_DEFAULT to a specific explicit
- * QOP value. However a value of 0 should always be interpreted by a GSSAPI
- * implementation as a request for the default protection level.
- */
-#define GSS_C_QOP_DEFAULT 0
-
-/*
* Expiration time of 2^32-1 seconds means infinite lifetime for a
* credential or security context
*/
diff --git a/include/linux/sunrpc/gss_krb5.h b/include/linux/sunrpc/gss_krb5.h
index ffe31d2eb9e..2c3601d3104 100644
--- a/include/linux/sunrpc/gss_krb5.h
+++ b/include/linux/sunrpc/gss_krb5.h
@@ -116,18 +116,22 @@ enum seal_alg {
s32
make_checksum(s32 cksumtype, char *header, int hdrlen, struct xdr_buf *body,
- struct xdr_netobj *cksum);
+ int body_offset, struct xdr_netobj *cksum);
+
+u32 gss_get_mic_kerberos(struct gss_ctx *, struct xdr_buf *,
+ struct xdr_netobj *);
+
+u32 gss_verify_mic_kerberos(struct gss_ctx *, struct xdr_buf *,
+ struct xdr_netobj *);
u32
-krb5_make_token(struct krb5_ctx *context_handle, int qop_req,
- struct xdr_buf *input_message_buffer,
- struct xdr_netobj *output_message_buffer, int toktype);
+gss_wrap_kerberos(struct gss_ctx *ctx_id, int offset,
+ struct xdr_buf *outbuf, struct page **pages);
u32
-krb5_read_token(struct krb5_ctx *context_handle,
- struct xdr_netobj *input_token_buffer,
- struct xdr_buf *message_buffer,
- int *qop_state, int toktype);
+gss_unwrap_kerberos(struct gss_ctx *ctx_id, int offset,
+ struct xdr_buf *buf);
+
u32
krb5_encrypt(struct crypto_tfm * key,
@@ -137,6 +141,13 @@ u32
krb5_decrypt(struct crypto_tfm * key,
void *iv, void *in, void *out, int length);
+int
+gss_encrypt_xdr_buf(struct crypto_tfm *tfm, struct xdr_buf *outbuf, int offset,
+ struct page **pages);
+
+int
+gss_decrypt_xdr_buf(struct crypto_tfm *tfm, struct xdr_buf *inbuf, int offset);
+
s32
krb5_make_seq_num(struct crypto_tfm * key,
int direction,
diff --git a/include/linux/sunrpc/gss_spkm3.h b/include/linux/sunrpc/gss_spkm3.h
index b5c9968c3c1..0beb2cf00a8 100644
--- a/include/linux/sunrpc/gss_spkm3.h
+++ b/include/linux/sunrpc/gss_spkm3.h
@@ -41,9 +41,9 @@ struct spkm3_ctx {
#define SPKM_WRAP_TOK 5
#define SPKM_DEL_TOK 6
-u32 spkm3_make_token(struct spkm3_ctx *ctx, int qop_req, struct xdr_buf * text, struct xdr_netobj * token, int toktype);
+u32 spkm3_make_token(struct spkm3_ctx *ctx, struct xdr_buf * text, struct xdr_netobj * token, int toktype);
-u32 spkm3_read_token(struct spkm3_ctx *ctx, struct xdr_netobj *read_token, struct xdr_buf *message_buffer, int *qop_state, int toktype);
+u32 spkm3_read_token(struct spkm3_ctx *ctx, struct xdr_netobj *read_token, struct xdr_buf *message_buffer, int toktype);
#define CKSUMTYPE_RSA_MD5 0x0007
diff --git a/include/linux/sunrpc/msg_prot.h b/include/linux/sunrpc/msg_prot.h
index 15f11533238..f43f237360a 100644
--- a/include/linux/sunrpc/msg_prot.h
+++ b/include/linux/sunrpc/msg_prot.h
@@ -76,5 +76,30 @@ enum rpc_auth_stat {
#define RPC_MAXNETNAMELEN 256
+/*
+ * From RFC 1831:
+ *
+ * "A record is composed of one or more record fragments. A record
+ * fragment is a four-byte header followed by 0 to (2**31) - 1 bytes of
+ * fragment data. The bytes encode an unsigned binary number; as with
+ * XDR integers, the byte order is from highest to lowest. The number
+ * encodes two values -- a boolean which indicates whether the fragment
+ * is the last fragment of the record (bit value 1 implies the fragment
+ * is the last fragment) and a 31-bit unsigned binary value which is the
+ * length in bytes of the fragment's data. The boolean value is the
+ * highest-order bit of the header; the length is the 31 low-order bits.
+ * (Note that this record specification is NOT in XDR standard form!)"
+ *
+ * The Linux RPC client always sends its requests in a single record
+ * fragment, limiting the maximum payload size for stream transports to
+ * 2GB.
+ */
+
+typedef u32 rpc_fraghdr;
+
+#define RPC_LAST_STREAM_FRAGMENT (1U << 31)
+#define RPC_FRAGMENT_SIZE_MASK (~RPC_LAST_STREAM_FRAGMENT)
+#define RPC_MAX_FRAGMENT_SIZE ((1U << 31) - 1)
+
#endif /* __KERNEL__ */
#endif /* _LINUX_SUNRPC_MSGPROT_H_ */
diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h
index 23448d0fb5b..5da968729cf 100644
--- a/include/linux/sunrpc/xdr.h
+++ b/include/linux/sunrpc/xdr.h
@@ -161,14 +161,10 @@ typedef struct {
typedef size_t (*skb_read_actor_t)(skb_reader_t *desc, void *to, size_t len);
+extern int csum_partial_copy_to_xdr(struct xdr_buf *, struct sk_buff *);
extern ssize_t xdr_partial_copy_from_skb(struct xdr_buf *, unsigned int,
skb_reader_t *, skb_read_actor_t);
-struct socket;
-struct sockaddr;
-extern int xdr_sendpages(struct socket *, struct sockaddr *, int,
- struct xdr_buf *, unsigned int, int);
-
extern int xdr_encode_word(struct xdr_buf *, int, u32);
extern int xdr_decode_word(struct xdr_buf *, int, u32 *);
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index e618c164981..3b8b6e823c7 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -1,5 +1,5 @@
/*
- * linux/include/linux/sunrpc/clnt_xprt.h
+ * linux/include/linux/sunrpc/xprt.h
*
* Declarations for the RPC transport interface.
*
@@ -15,20 +15,6 @@
#include <linux/sunrpc/sched.h>
#include <linux/sunrpc/xdr.h>
-/*
- * The transport code maintains an estimate on the maximum number of out-
- * standing RPC requests, using a smoothed version of the congestion
- * avoidance implemented in 44BSD. This is basically the Van Jacobson
- * congestion algorithm: If a retransmit occurs, the congestion window is
- * halved; otherwise, it is incremented by 1/cwnd when
- *
- * - a reply is received and
- * - a full number of requests are outstanding and
- * - the congestion window hasn't been updated recently.
- *
- * Upper procedures may check whether a request would block waiting for
- * a free RPC slot by using the RPC_CONGESTED() macro.
- */
extern unsigned int xprt_udp_slot_table_entries;
extern unsigned int xprt_tcp_slot_table_entries;
@@ -36,34 +22,23 @@ extern unsigned int xprt_tcp_slot_table_entries;
#define RPC_DEF_SLOT_TABLE (16U)
#define RPC_MAX_SLOT_TABLE (128U)
-#define RPC_CWNDSHIFT (8U)
-#define RPC_CWNDSCALE (1U << RPC_CWNDSHIFT)
-#define RPC_INITCWND RPC_CWNDSCALE
-#define RPC_MAXCWND(xprt) ((xprt)->max_reqs << RPC_CWNDSHIFT)
-#define RPCXPRT_CONGESTED(xprt) ((xprt)->cong >= (xprt)->cwnd)
-
-/* Default timeout values */
-#define RPC_MAX_UDP_TIMEOUT (60*HZ)
-#define RPC_MAX_TCP_TIMEOUT (600*HZ)
-
/*
- * Wait duration for an RPC TCP connection to be established. Solaris
- * NFS over TCP uses 60 seconds, for example, which is in line with how
- * long a server takes to reboot.
+ * RPC call and reply header size as number of 32bit words (verifier
+ * size computed separately)
*/
-#define RPC_CONNECT_TIMEOUT (60*HZ)
+#define RPC_CALLHDRSIZE 6
+#define RPC_REPHDRSIZE 4
/*
- * Delay an arbitrary number of seconds before attempting to reconnect
- * after an error.
+ * Parameters for choosing a free port
*/
-#define RPC_REESTABLISH_TIMEOUT (15*HZ)
+extern unsigned int xprt_min_resvport;
+extern unsigned int xprt_max_resvport;
-/* RPC call and reply header size as number of 32bit words (verifier
- * size computed separately)
- */
-#define RPC_CALLHDRSIZE 6
-#define RPC_REPHDRSIZE 4
+#define RPC_MIN_RESVPORT (1U)
+#define RPC_MAX_RESVPORT (65535U)
+#define RPC_DEF_MIN_RESVPORT (650U)
+#define RPC_DEF_MAX_RESVPORT (1023U)
/*
* This describes a timeout strategy
@@ -76,6 +51,9 @@ struct rpc_timeout {
unsigned char to_exponential;
};
+struct rpc_task;
+struct rpc_xprt;
+
/*
* This describes a complete RPC request
*/
@@ -95,7 +73,10 @@ struct rpc_rqst {
int rq_cong; /* has incremented xprt->cong */
int rq_received; /* receive completed */
u32 rq_seqno; /* gss seq no. used on req. */
-
+ int rq_enc_pages_num;
+ struct page **rq_enc_pages; /* scratch pages for use by
+ gss privacy code */
+ void (*rq_release_snd_buf)(struct rpc_rqst *); /* release rq_enc_pages */
struct list_head rq_list;
struct xdr_buf rq_private_buf; /* The receive buffer
@@ -121,12 +102,21 @@ struct rpc_rqst {
#define rq_svec rq_snd_buf.head
#define rq_slen rq_snd_buf.len
-#define XPRT_LAST_FRAG (1 << 0)
-#define XPRT_COPY_RECM (1 << 1)
-#define XPRT_COPY_XID (1 << 2)
-#define XPRT_COPY_DATA (1 << 3)
+struct rpc_xprt_ops {
+ void (*set_buffer_size)(struct rpc_xprt *xprt, size_t sndsize, size_t rcvsize);
+ int (*reserve_xprt)(struct rpc_task *task);
+ void (*release_xprt)(struct rpc_xprt *xprt, struct rpc_task *task);
+ void (*connect)(struct rpc_task *task);
+ int (*send_request)(struct rpc_task *task);
+ void (*set_retrans_timeout)(struct rpc_task *task);
+ void (*timer)(struct rpc_task *task);
+ void (*release_request)(struct rpc_task *task);
+ void (*close)(struct rpc_xprt *xprt);
+ void (*destroy)(struct rpc_xprt *xprt);
+};
struct rpc_xprt {
+ struct rpc_xprt_ops * ops; /* transport methods */
struct socket * sock; /* BSD socket layer */
struct sock * inet; /* INET layer */
@@ -137,11 +127,13 @@ struct rpc_xprt {
unsigned long cong; /* current congestion */
unsigned long cwnd; /* congestion window */
- unsigned int rcvsize, /* socket receive buffer size */
- sndsize; /* socket send buffer size */
+ size_t rcvsize, /* transport rcv buffer size */
+ sndsize; /* transport send buffer size */
size_t max_payload; /* largest RPC payload size,
in bytes */
+ unsigned int tsh_size; /* size of transport specific
+ header */
struct rpc_wait_queue sending; /* requests waiting to send */
struct rpc_wait_queue resend; /* requests waiting to resend */
@@ -150,11 +142,9 @@ struct rpc_xprt {
struct list_head free; /* free slots */
struct rpc_rqst * slot; /* slot table storage */
unsigned int max_reqs; /* total slots */
- unsigned long sockstate; /* Socket state */
+ unsigned long state; /* transport state */
unsigned char shutdown : 1, /* being shut down */
- nocong : 1, /* no congestion control */
- resvport : 1, /* use a reserved port */
- stream : 1; /* TCP */
+ resvport : 1; /* use a reserved port */
/*
* XID
@@ -171,22 +161,27 @@ struct rpc_xprt {
unsigned long tcp_copied, /* copied to request */
tcp_flags;
/*
- * Connection of sockets
+ * Connection of transports
*/
- struct work_struct sock_connect;
+ unsigned long connect_timeout,
+ bind_timeout,
+ reestablish_timeout;
+ struct work_struct connect_worker;
unsigned short port;
+
/*
- * Disconnection of idle sockets
+ * Disconnection of idle transports
*/
struct work_struct task_cleanup;
struct timer_list timer;
- unsigned long last_used;
+ unsigned long last_used,
+ idle_timeout;
/*
* Send stuff
*/
- spinlock_t sock_lock; /* lock socket info */
- spinlock_t xprt_lock; /* lock xprt info */
+ spinlock_t transport_lock; /* lock transport info */
+ spinlock_t reserve_lock; /* lock slot table */
struct rpc_task * snd_task; /* Task blocked in send */
struct list_head recv;
@@ -195,37 +190,111 @@ struct rpc_xprt {
void (*old_data_ready)(struct sock *, int);
void (*old_state_change)(struct sock *);
void (*old_write_space)(struct sock *);
-
- wait_queue_head_t cong_wait;
};
+#define XPRT_LAST_FRAG (1 << 0)
+#define XPRT_COPY_RECM (1 << 1)
+#define XPRT_COPY_XID (1 << 2)
+#define XPRT_COPY_DATA (1 << 3)
+
#ifdef __KERNEL__
-struct rpc_xprt * xprt_create_proto(int proto, struct sockaddr_in *addr,
- struct rpc_timeout *toparms);
-int xprt_destroy(struct rpc_xprt *);
-void xprt_set_timeout(struct rpc_timeout *, unsigned int,
- unsigned long);
+/*
+ * Transport operations used by ULPs
+ */
+struct rpc_xprt * xprt_create_proto(int proto, struct sockaddr_in *addr, struct rpc_timeout *to);
+void xprt_set_timeout(struct rpc_timeout *to, unsigned int retr, unsigned long incr);
-void xprt_reserve(struct rpc_task *);
-int xprt_prepare_transmit(struct rpc_task *);
-void xprt_transmit(struct rpc_task *);
-void xprt_receive(struct rpc_task *);
+/*
+ * Generic internal transport functions
+ */
+void xprt_connect(struct rpc_task *task);
+void xprt_reserve(struct rpc_task *task);
+int xprt_reserve_xprt(struct rpc_task *task);
+int xprt_reserve_xprt_cong(struct rpc_task *task);
+int xprt_prepare_transmit(struct rpc_task *task);
+void xprt_transmit(struct rpc_task *task);
+void xprt_abort_transmit(struct rpc_task *task);
int xprt_adjust_timeout(struct rpc_rqst *req);
-void xprt_release(struct rpc_task *);
-void xprt_connect(struct rpc_task *);
-void xprt_sock_setbufsize(struct rpc_xprt *);
-
-#define XPRT_LOCKED 0
-#define XPRT_CONNECT 1
-#define XPRT_CONNECTING 2
-
-#define xprt_connected(xp) (test_bit(XPRT_CONNECT, &(xp)->sockstate))
-#define xprt_set_connected(xp) (set_bit(XPRT_CONNECT, &(xp)->sockstate))
-#define xprt_test_and_set_connected(xp) (test_and_set_bit(XPRT_CONNECT, &(xp)->sockstate))
-#define xprt_test_and_clear_connected(xp) \
- (test_and_clear_bit(XPRT_CONNECT, &(xp)->sockstate))
-#define xprt_clear_connected(xp) (clear_bit(XPRT_CONNECT, &(xp)->sockstate))
+void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task);
+void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task);
+void xprt_release(struct rpc_task *task);
+int xprt_destroy(struct rpc_xprt *xprt);
+
+static inline u32 *xprt_skip_transport_header(struct rpc_xprt *xprt, u32 *p)
+{
+ return p + xprt->tsh_size;
+}
+
+/*
+ * Transport switch helper functions
+ */
+void xprt_set_retrans_timeout_def(struct rpc_task *task);
+void xprt_set_retrans_timeout_rtt(struct rpc_task *task);
+void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status);
+void xprt_wait_for_buffer_space(struct rpc_task *task);
+void xprt_write_space(struct rpc_xprt *xprt);
+void xprt_update_rtt(struct rpc_task *task);
+void xprt_adjust_cwnd(struct rpc_task *task, int result);
+struct rpc_rqst * xprt_lookup_rqst(struct rpc_xprt *xprt, u32 xid);
+void xprt_complete_rqst(struct rpc_task *task, int copied);
+void xprt_release_rqst_cong(struct rpc_task *task);
+void xprt_disconnect(struct rpc_xprt *xprt);
+
+/*
+ * Socket transport setup operations
+ */
+int xs_setup_udp(struct rpc_xprt *xprt, struct rpc_timeout *to);
+int xs_setup_tcp(struct rpc_xprt *xprt, struct rpc_timeout *to);
+
+/*
+ * Reserved bit positions in xprt->state
+ */
+#define XPRT_LOCKED (0)
+#define XPRT_CONNECTED (1)
+#define XPRT_CONNECTING (2)
+
+static inline void xprt_set_connected(struct rpc_xprt *xprt)
+{
+ set_bit(XPRT_CONNECTED, &xprt->state);
+}
+
+static inline void xprt_clear_connected(struct rpc_xprt *xprt)
+{
+ clear_bit(XPRT_CONNECTED, &xprt->state);
+}
+
+static inline int xprt_connected(struct rpc_xprt *xprt)
+{
+ return test_bit(XPRT_CONNECTED, &xprt->state);
+}
+
+static inline int xprt_test_and_set_connected(struct rpc_xprt *xprt)
+{
+ return test_and_set_bit(XPRT_CONNECTED, &xprt->state);
+}
+
+static inline int xprt_test_and_clear_connected(struct rpc_xprt *xprt)
+{
+ return test_and_clear_bit(XPRT_CONNECTED, &xprt->state);
+}
+
+static inline void xprt_clear_connecting(struct rpc_xprt *xprt)
+{
+ smp_mb__before_clear_bit();
+ clear_bit(XPRT_CONNECTING, &xprt->state);
+ smp_mb__after_clear_bit();
+}
+
+static inline int xprt_connecting(struct rpc_xprt *xprt)
+{
+ return test_bit(XPRT_CONNECTING, &xprt->state);
+}
+
+static inline int xprt_test_and_set_connecting(struct rpc_xprt *xprt)
+{
+ return test_and_set_bit(XPRT_CONNECTING, &xprt->state);
+}
#endif /* __KERNEL__*/
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index ad15a54806d..ba448c76016 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -71,7 +71,7 @@ void restore_processor_state(void);
struct saved_context;
void __save_processor_state(struct saved_context *ctxt);
void __restore_processor_state(struct saved_context *ctxt);
-extern unsigned long get_usable_page(unsigned gfp_mask);
+extern unsigned long get_usable_page(gfp_t gfp_mask);
extern void free_eaten_memory(void);
#endif /* _LINUX_SWSUSP_H */
diff --git a/include/linux/swap.h b/include/linux/swap.h
index a7bf1a3b149..20c975642ca 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -171,8 +171,8 @@ extern int rotate_reclaimable_page(struct page *page);
extern void swap_setup(void);
/* linux/mm/vmscan.c */
-extern int try_to_free_pages(struct zone **, unsigned int);
-extern int zone_reclaim(struct zone *, unsigned int, unsigned int);
+extern int try_to_free_pages(struct zone **, gfp_t);
+extern int zone_reclaim(struct zone *, gfp_t, unsigned int);
extern int shrink_all_memory(int);
extern int vm_swappiness;
diff --git a/include/linux/textsearch.h b/include/linux/textsearch.h
index 515046d1b2f..fc5bb4e91a5 100644
--- a/include/linux/textsearch.h
+++ b/include/linux/textsearch.h
@@ -40,7 +40,7 @@ struct ts_state
struct ts_ops
{
const char *name;
- struct ts_config * (*init)(const void *, unsigned int, int);
+ struct ts_config * (*init)(const void *, unsigned int, gfp_t);
unsigned int (*find)(struct ts_config *,
struct ts_state *);
void (*destroy)(struct ts_config *);
@@ -148,7 +148,7 @@ static inline unsigned int textsearch_get_pattern_len(struct ts_config *conf)
extern int textsearch_register(struct ts_ops *);
extern int textsearch_unregister(struct ts_ops *);
extern struct ts_config *textsearch_prepare(const char *, const void *,
- unsigned int, int, int);
+ unsigned int, gfp_t, int);
extern void textsearch_destroy(struct ts_config *conf);
extern unsigned int textsearch_find_continuous(struct ts_config *,
struct ts_state *,
diff --git a/include/linux/types.h b/include/linux/types.h
index 0aee34f9da9..21b9ce80364 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -151,7 +151,12 @@ typedef unsigned long sector_t;
*/
#ifdef __CHECKER__
-#define __bitwise __attribute__((bitwise))
+#define __bitwise__ __attribute__((bitwise))
+#else
+#define __bitwise__
+#endif
+#ifdef __CHECK_ENDIAN__
+#define __bitwise __bitwise__
#else
#define __bitwise
#endif
@@ -166,7 +171,7 @@ typedef __u64 __bitwise __be64;
#endif
#ifdef __KERNEL__
-typedef unsigned __nocast gfp_t;
+typedef unsigned __bitwise__ gfp_t;
#endif
struct ustat {
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 4dbe580f933..8f731e8f282 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -933,17 +933,17 @@ static inline void usb_fill_int_urb (struct urb *urb,
}
extern void usb_init_urb(struct urb *urb);
-extern struct urb *usb_alloc_urb(int iso_packets, unsigned mem_flags);
+extern struct urb *usb_alloc_urb(int iso_packets, gfp_t mem_flags);
extern void usb_free_urb(struct urb *urb);
#define usb_put_urb usb_free_urb
extern struct urb *usb_get_urb(struct urb *urb);
-extern int usb_submit_urb(struct urb *urb, unsigned mem_flags);
+extern int usb_submit_urb(struct urb *urb, gfp_t mem_flags);
extern int usb_unlink_urb(struct urb *urb);
extern void usb_kill_urb(struct urb *urb);
#define HAVE_USB_BUFFERS
void *usb_buffer_alloc (struct usb_device *dev, size_t size,
- unsigned mem_flags, dma_addr_t *dma);
+ gfp_t mem_flags, dma_addr_t *dma);
void usb_buffer_free (struct usb_device *dev, size_t size,
void *addr, dma_addr_t dma);
@@ -1050,7 +1050,7 @@ int usb_sg_init (
struct scatterlist *sg,
int nents,
size_t length,
- unsigned mem_flags
+ gfp_t mem_flags
);
void usb_sg_cancel (struct usb_sg_request *io);
void usb_sg_wait (struct usb_sg_request *io);
diff --git a/include/linux/usb_gadget.h b/include/linux/usb_gadget.h
index 71e60860732..ff81117eb73 100644
--- a/include/linux/usb_gadget.h
+++ b/include/linux/usb_gadget.h
@@ -107,18 +107,18 @@ struct usb_ep_ops {
int (*disable) (struct usb_ep *ep);
struct usb_request *(*alloc_request) (struct usb_ep *ep,
- unsigned gfp_flags);
+ gfp_t gfp_flags);
void (*free_request) (struct usb_ep *ep, struct usb_request *req);
void *(*alloc_buffer) (struct usb_ep *ep, unsigned bytes,
- dma_addr_t *dma, unsigned gfp_flags);
+ dma_addr_t *dma, gfp_t gfp_flags);
void (*free_buffer) (struct usb_ep *ep, void *buf, dma_addr_t dma,
unsigned bytes);
// NOTE: on 2.6, drivers may also use dma_map() and
// dma_sync_single_*() to directly manage dma overhead.
int (*queue) (struct usb_ep *ep, struct usb_request *req,
- unsigned gfp_flags);
+ gfp_t gfp_flags);
int (*dequeue) (struct usb_ep *ep, struct usb_request *req);
int (*set_halt) (struct usb_ep *ep, int value);
@@ -214,7 +214,7 @@ usb_ep_disable (struct usb_ep *ep)
* Returns the request, or null if one could not be allocated.
*/
static inline struct usb_request *
-usb_ep_alloc_request (struct usb_ep *ep, unsigned gfp_flags)
+usb_ep_alloc_request (struct usb_ep *ep, gfp_t gfp_flags)
{
return ep->ops->alloc_request (ep, gfp_flags);
}
@@ -254,7 +254,7 @@ usb_ep_free_request (struct usb_ep *ep, struct usb_request *req)
*/
static inline void *
usb_ep_alloc_buffer (struct usb_ep *ep, unsigned len, dma_addr_t *dma,
- unsigned gfp_flags)
+ gfp_t gfp_flags)
{
return ep->ops->alloc_buffer (ep, len, dma, gfp_flags);
}
@@ -330,7 +330,7 @@ usb_ep_free_buffer (struct usb_ep *ep, void *buf, dma_addr_t dma, unsigned len)
* reported when the usb peripheral is disconnected.
*/
static inline int
-usb_ep_queue (struct usb_ep *ep, struct usb_request *req, unsigned gfp_flags)
+usb_ep_queue (struct usb_ep *ep, struct usb_request *req, gfp_t gfp_flags)
{
return ep->ops->queue (ep, req, gfp_flags);
}
diff --git a/include/linux/wanpipe.h b/include/linux/wanpipe.h
index 167d956c492..dae9860091d 100644
--- a/include/linux/wanpipe.h
+++ b/include/linux/wanpipe.h
@@ -265,15 +265,6 @@ typedef struct {
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
-
-#define is_digit(ch) (((ch)>=(unsigned)'0'&&(ch)<=(unsigned)'9')?1:0)
-#define is_alpha(ch) ((((ch)>=(unsigned)'a'&&(ch)<=(unsigned)'z')||\
- ((ch)>=(unsigned)'A'&&(ch)<=(unsigned)'Z'))?1:0)
-#define is_hex_digit(ch) ((((ch)>=(unsigned)'0'&&(ch)<=(unsigned)'9')||\
- ((ch)>=(unsigned)'a'&&(ch)<=(unsigned)'f')||\
- ((ch)>=(unsigned)'A'&&(ch)<=(unsigned)'F'))?1:0)
-
-
/****** Data Structures *****************************************************/
/* Adapter Data Space.
diff --git a/include/net/ax25.h b/include/net/ax25.h
index 9dbcd9e51c0..30bb4a89323 100644
--- a/include/net/ax25.h
+++ b/include/net/ax25.h
@@ -171,7 +171,7 @@ typedef struct {
ax25_address calls[AX25_MAX_DIGIS];
unsigned char repeated[AX25_MAX_DIGIS];
unsigned char ndigi;
- char lastrepeat;
+ signed char lastrepeat;
} ax25_digi;
typedef struct ax25_route {
diff --git a/include/net/dst.h b/include/net/dst.h
index 4a056a68243..6c196a5baf2 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -94,7 +94,6 @@ struct dst_ops
struct dst_entry * (*negative_advice)(struct dst_entry *);
void (*link_failure)(struct sk_buff *);
void (*update_pmtu)(struct dst_entry *dst, u32 mtu);
- int (*get_mss)(struct dst_entry *dst, u32 mtu);
int entry_size;
atomic_t entries;
diff --git a/include/net/ieee80211.h b/include/net/ieee80211.h
index dc36b1be674..5e38dca1d08 100644
--- a/include/net/ieee80211.h
+++ b/include/net/ieee80211.h
@@ -11,19 +11,26 @@
*
* Adaption to a generic IEEE 802.11 stack by James Ketrenos
* <jketreno@linux.intel.com>
- * Copyright (c) 2004, Intel Corporation
+ * Copyright (c) 2004-2005, Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation. See README and COPYING for
* more details.
+ *
+ * API Version History
+ * 1.0.x -- Initial version
+ * 1.1.x -- Added radiotap, QoS, TIM, ieee80211_geo APIs,
+ * various structure changes, and crypto API init method
*/
#ifndef IEEE80211_H
#define IEEE80211_H
-#include <linux/if_ether.h> /* ETH_ALEN */
-#include <linux/kernel.h> /* ARRAY_SIZE */
+#include <linux/if_ether.h> /* ETH_ALEN */
+#include <linux/kernel.h> /* ARRAY_SIZE */
#include <linux/wireless.h>
+#define IEEE80211_VERSION "git-1.1.6"
+
#define IEEE80211_DATA_LEN 2304
/* Maximum size for the MA-UNITDATA primitive, 802.11 standard section
6.2.1.1.2.
@@ -33,34 +40,13 @@
represents the 2304 bytes of real data, plus a possible 8 bytes of
WEP IV and ICV. (this interpretation suggested by Ramiro Barreiro) */
-
-#define IEEE80211_HLEN 30
-#define IEEE80211_FRAME_LEN (IEEE80211_DATA_LEN + IEEE80211_HLEN)
-
-struct ieee80211_hdr {
- __le16 frame_ctl;
- __le16 duration_id;
- u8 addr1[ETH_ALEN];
- u8 addr2[ETH_ALEN];
- u8 addr3[ETH_ALEN];
- __le16 seq_ctl;
- u8 addr4[ETH_ALEN];
-} __attribute__ ((packed));
-
-struct ieee80211_hdr_3addr {
- __le16 frame_ctl;
- __le16 duration_id;
- u8 addr1[ETH_ALEN];
- u8 addr2[ETH_ALEN];
- u8 addr3[ETH_ALEN];
- __le16 seq_ctl;
-} __attribute__ ((packed));
-
#define IEEE80211_1ADDR_LEN 10
#define IEEE80211_2ADDR_LEN 16
#define IEEE80211_3ADDR_LEN 24
#define IEEE80211_4ADDR_LEN 30
#define IEEE80211_FCS_LEN 4
+#define IEEE80211_HLEN (IEEE80211_4ADDR_LEN)
+#define IEEE80211_FRAME_LEN (IEEE80211_DATA_LEN + IEEE80211_HLEN)
#define MIN_FRAG_THRESHOLD 256U
#define MAX_FRAG_THRESHOLD 2346U
@@ -113,11 +99,11 @@ struct ieee80211_hdr_3addr {
#define IEEE80211_STYPE_CFACK 0x0050
#define IEEE80211_STYPE_CFPOLL 0x0060
#define IEEE80211_STYPE_CFACKPOLL 0x0070
+#define IEEE80211_STYPE_QOS_DATA 0x0080
#define IEEE80211_SCTL_FRAG 0x000F
#define IEEE80211_SCTL_SEQ 0xFFF0
-
/* debug macros */
#ifdef CONFIG_IEEE80211_DEBUG
@@ -128,8 +114,7 @@ do { if (ieee80211_debug_level & (level)) \
in_interrupt() ? 'I' : 'U', __FUNCTION__ , ## args); } while (0)
#else
#define IEEE80211_DEBUG(level, fmt, args...) do {} while (0)
-#endif /* CONFIG_IEEE80211_DEBUG */
-
+#endif /* CONFIG_IEEE80211_DEBUG */
/* debug macros not dependent on CONFIG_IEEE80211_DEBUG */
@@ -140,7 +125,6 @@ do { if (ieee80211_debug_level & (level)) \
* messages. It should never be used for passing essid to user space. */
const char *escape_essid(const char *essid, u8 essid_len);
-
/*
* To use the debug system:
*
@@ -177,6 +161,7 @@ const char *escape_essid(const char *essid, u8 essid_len);
#define IEEE80211_DL_TX (1<<8)
#define IEEE80211_DL_RX (1<<9)
+#define IEEE80211_DL_QOS (1<<31)
#define IEEE80211_ERROR(f, a...) printk(KERN_ERR "ieee80211: " f, ## a)
#define IEEE80211_WARNING(f, a...) printk(KERN_WARNING "ieee80211: " f, ## a)
@@ -190,9 +175,10 @@ const char *escape_essid(const char *essid, u8 essid_len);
#define IEEE80211_DEBUG_DROP(f, a...) IEEE80211_DEBUG(IEEE80211_DL_DROP, f, ## a)
#define IEEE80211_DEBUG_TX(f, a...) IEEE80211_DEBUG(IEEE80211_DL_TX, f, ## a)
#define IEEE80211_DEBUG_RX(f, a...) IEEE80211_DEBUG(IEEE80211_DL_RX, f, ## a)
+#define IEEE80211_DEBUG_QOS(f, a...) IEEE80211_DEBUG(IEEE80211_DL_QOS, f, ## a)
#include <linux/netdevice.h>
#include <linux/wireless.h>
-#include <linux/if_arp.h> /* ARPHRD_ETHER */
+#include <linux/if_arp.h> /* ARPHRD_ETHER */
#ifndef WIRELESS_SPY
#define WIRELESS_SPY /* enable iwspy support */
@@ -200,10 +186,10 @@ const char *escape_essid(const char *essid, u8 essid_len);
#include <net/iw_handler.h> /* new driver API */
#ifndef ETH_P_PAE
-#define ETH_P_PAE 0x888E /* Port Access Entity (IEEE 802.1X) */
-#endif /* ETH_P_PAE */
+#define ETH_P_PAE 0x888E /* Port Access Entity (IEEE 802.1X) */
+#endif /* ETH_P_PAE */
-#define ETH_P_PREAUTH 0x88C7 /* IEEE 802.11i pre-authentication */
+#define ETH_P_PREAUTH 0x88C7 /* IEEE 802.11i pre-authentication */
#ifndef ETH_P_80211_RAW
#define ETH_P_80211_RAW (ETH_P_ECONET + 1)
@@ -215,10 +201,10 @@ const char *escape_essid(const char *essid, u8 essid_len);
struct ieee80211_snap_hdr {
- u8 dsap; /* always 0xAA */
- u8 ssap; /* always 0xAA */
- u8 ctrl; /* always 0x03 */
- u8 oui[P80211_OUI_LEN]; /* organizational universal id */
+ u8 dsap; /* always 0xAA */
+ u8 ssap; /* always 0xAA */
+ u8 ctrl; /* always 0x03 */
+ u8 oui[P80211_OUI_LEN]; /* organizational universal id */
} __attribute__ ((packed));
@@ -246,8 +232,9 @@ struct ieee80211_snap_hdr {
#define WLAN_CAPABILITY_PBCC (1<<6)
#define WLAN_CAPABILITY_CHANNEL_AGILITY (1<<7)
#define WLAN_CAPABILITY_SPECTRUM_MGMT (1<<8)
+#define WLAN_CAPABILITY_QOS (1<<9)
#define WLAN_CAPABILITY_SHORT_SLOT_TIME (1<<10)
-#define WLAN_CAPABILITY_OSSS_OFDM (1<<13)
+#define WLAN_CAPABILITY_DSSS_OFDM (1<<13)
/* Status codes */
enum ieee80211_statuscode {
@@ -312,14 +299,12 @@ enum ieee80211_reasoncode {
WLAN_REASON_CIPHER_SUITE_REJECTED = 24,
};
-
#define IEEE80211_STATMASK_SIGNAL (1<<0)
#define IEEE80211_STATMASK_RSSI (1<<1)
#define IEEE80211_STATMASK_NOISE (1<<2)
#define IEEE80211_STATMASK_RATE (1<<3)
#define IEEE80211_STATMASK_WEMASK 0x7
-
#define IEEE80211_CCK_MODULATION (1<<0)
#define IEEE80211_OFDM_MODULATION (1<<1)
@@ -377,9 +362,6 @@ enum ieee80211_reasoncode {
#define IEEE80211_NUM_CCK_RATES 4
#define IEEE80211_OFDM_SHIFT_MASK_A 4
-
-
-
/* NOTE: This data is for statistical purposes; not all hardware provides this
* information for frames received. Not setting these will not cause
* any adverse affects. */
@@ -388,7 +370,7 @@ struct ieee80211_rx_stats {
s8 rssi;
u8 signal;
u8 noise;
- u16 rate; /* in 100 kbps */
+ u16 rate; /* in 100 kbps */
u8 received_channel;
u8 control;
u8 mask;
@@ -439,38 +421,44 @@ struct ieee80211_device;
#include "ieee80211_crypt.h"
-#define SEC_KEY_1 (1<<0)
-#define SEC_KEY_2 (1<<1)
-#define SEC_KEY_3 (1<<2)
-#define SEC_KEY_4 (1<<3)
-#define SEC_ACTIVE_KEY (1<<4)
-#define SEC_AUTH_MODE (1<<5)
-#define SEC_UNICAST_GROUP (1<<6)
-#define SEC_LEVEL (1<<7)
-#define SEC_ENABLED (1<<8)
-
-#define SEC_LEVEL_0 0 /* None */
-#define SEC_LEVEL_1 1 /* WEP 40 and 104 bit */
-#define SEC_LEVEL_2 2 /* Level 1 + TKIP */
-#define SEC_LEVEL_2_CKIP 3 /* Level 1 + CKIP */
-#define SEC_LEVEL_3 4 /* Level 2 + CCMP */
-
-#define WEP_KEYS 4
-#define WEP_KEY_LEN 13
+#define SEC_KEY_1 (1<<0)
+#define SEC_KEY_2 (1<<1)
+#define SEC_KEY_3 (1<<2)
+#define SEC_KEY_4 (1<<3)
+#define SEC_ACTIVE_KEY (1<<4)
+#define SEC_AUTH_MODE (1<<5)
+#define SEC_UNICAST_GROUP (1<<6)
+#define SEC_LEVEL (1<<7)
+#define SEC_ENABLED (1<<8)
+#define SEC_ENCRYPT (1<<9)
+
+#define SEC_LEVEL_0 0 /* None */
+#define SEC_LEVEL_1 1 /* WEP 40 and 104 bit */
+#define SEC_LEVEL_2 2 /* Level 1 + TKIP */
+#define SEC_LEVEL_2_CKIP 3 /* Level 1 + CKIP */
+#define SEC_LEVEL_3 4 /* Level 2 + CCMP */
+
+#define SEC_ALG_NONE 0
+#define SEC_ALG_WEP 1
+#define SEC_ALG_TKIP 2
+#define SEC_ALG_CCMP 3
+
+#define WEP_KEYS 4
+#define WEP_KEY_LEN 13
+#define SCM_KEY_LEN 32
+#define SCM_TEMPORAL_KEY_LENGTH 16
struct ieee80211_security {
u16 active_key:2,
- enabled:1,
- auth_mode:2,
- auth_algo:4,
- unicast_uses_group:1;
+ enabled:1,
+ auth_mode:2, auth_algo:4, unicast_uses_group:1, encrypt:1;
+ u8 encode_alg[WEP_KEYS];
u8 key_sizes[WEP_KEYS];
- u8 keys[WEP_KEYS][WEP_KEY_LEN];
+ u8 keys[WEP_KEYS][SCM_KEY_LEN];
u8 level;
u16 flags;
} __attribute__ ((packed));
-
/*
802.11 data frame from AP
@@ -494,7 +482,7 @@ enum ieee80211_mfie {
MFIE_TYPE_RATES = 1,
MFIE_TYPE_FH_SET = 2,
MFIE_TYPE_DS_SET = 3,
- MFIE_TYPE_CF_SET = 4,
+ MFIE_TYPE_CF_SET = 4,
MFIE_TYPE_TIM = 5,
MFIE_TYPE_IBSS_SET = 6,
MFIE_TYPE_COUNTRY = 7,
@@ -516,11 +504,75 @@ enum ieee80211_mfie {
MFIE_TYPE_RSN = 48,
MFIE_TYPE_RATES_EX = 50,
MFIE_TYPE_GENERIC = 221,
+ MFIE_TYPE_QOS_PARAMETER = 222,
};
-struct ieee80211_info_element_hdr {
- u8 id;
- u8 len;
+/* Minimal header; can be used for passing 802.11 frames with sufficient
+ * information to determine what type of underlying data type is actually
+ * stored in the data. */
+struct ieee80211_hdr {
+ __le16 frame_ctl;
+ __le16 duration_id;
+ u8 payload[0];
+} __attribute__ ((packed));
+
+struct ieee80211_hdr_1addr {
+ __le16 frame_ctl;
+ __le16 duration_id;
+ u8 addr1[ETH_ALEN];
+ u8 payload[0];
+} __attribute__ ((packed));
+
+struct ieee80211_hdr_2addr {
+ __le16 frame_ctl;
+ __le16 duration_id;
+ u8 addr1[ETH_ALEN];
+ u8 addr2[ETH_ALEN];
+ u8 payload[0];
+} __attribute__ ((packed));
+
+struct ieee80211_hdr_3addr {
+ __le16 frame_ctl;
+ __le16 duration_id;
+ u8 addr1[ETH_ALEN];
+ u8 addr2[ETH_ALEN];
+ u8 addr3[ETH_ALEN];
+ __le16 seq_ctl;
+ u8 payload[0];
+} __attribute__ ((packed));
+
+struct ieee80211_hdr_4addr {
+ __le16 frame_ctl;
+ __le16 duration_id;
+ u8 addr1[ETH_ALEN];
+ u8 addr2[ETH_ALEN];
+ u8 addr3[ETH_ALEN];
+ __le16 seq_ctl;
+ u8 addr4[ETH_ALEN];
+ u8 payload[0];
+} __attribute__ ((packed));
+
+struct ieee80211_hdr_3addrqos {
+ __le16 frame_ctl;
+ __le16 duration_id;
+ u8 addr1[ETH_ALEN];
+ u8 addr2[ETH_ALEN];
+ u8 addr3[ETH_ALEN];
+ __le16 seq_ctl;
+ u8 payload[0];
+ __le16 qos_ctl;
+} __attribute__ ((packed));
+
+struct ieee80211_hdr_4addrqos {
+ __le16 frame_ctl;
+ __le16 duration_id;
+ u8 addr1[ETH_ALEN];
+ u8 addr2[ETH_ALEN];
+ u8 addr3[ETH_ALEN];
+ __le16 seq_ctl;
+ u8 addr4[ETH_ALEN];
+ u8 payload[0];
+ __le16 qos_ctl;
} __attribute__ ((packed));
struct ieee80211_info_element {
@@ -546,49 +598,77 @@ struct ieee80211_info_element {
u16 status;
*/
-struct ieee80211_authentication {
+struct ieee80211_auth {
struct ieee80211_hdr_3addr header;
__le16 algorithm;
__le16 transaction;
__le16 status;
- struct ieee80211_info_element info_element;
+ /* challenge */
+ struct ieee80211_info_element info_element[0];
} __attribute__ ((packed));
+struct ieee80211_disassoc {
+ struct ieee80211_hdr_3addr header;
+ __le16 reason;
+} __attribute__ ((packed));
+
+/* Alias deauth for disassoc */
+#define ieee80211_deauth ieee80211_disassoc
+
+struct ieee80211_probe_request {
+ struct ieee80211_hdr_3addr header;
+ /* SSID, supported rates */
+ struct ieee80211_info_element info_element[0];
+} __attribute__ ((packed));
struct ieee80211_probe_response {
struct ieee80211_hdr_3addr header;
u32 time_stamp[2];
__le16 beacon_interval;
__le16 capability;
- struct ieee80211_info_element info_element;
+ /* SSID, supported rates, FH params, DS params,
+ * CF params, IBSS params, TIM (if beacon), RSN */
+ struct ieee80211_info_element info_element[0];
} __attribute__ ((packed));
-struct ieee80211_assoc_request_frame {
+/* Alias beacon for probe_response */
+#define ieee80211_beacon ieee80211_probe_response
+
+struct ieee80211_assoc_request {
+ struct ieee80211_hdr_3addr header;
+ __le16 capability;
+ __le16 listen_interval;
+ /* SSID, supported rates, RSN */
+ struct ieee80211_info_element info_element[0];
+} __attribute__ ((packed));
+
+struct ieee80211_reassoc_request {
+ struct ieee80211_hdr_3addr header;
__le16 capability;
__le16 listen_interval;
u8 current_ap[ETH_ALEN];
- struct ieee80211_info_element info_element;
+ struct ieee80211_info_element info_element[0];
} __attribute__ ((packed));
-struct ieee80211_assoc_response_frame {
+struct ieee80211_assoc_response {
struct ieee80211_hdr_3addr header;
__le16 capability;
__le16 status;
__le16 aid;
- struct ieee80211_info_element info_element; /* supported rates */
+ /* supported rates */
+ struct ieee80211_info_element info_element[0];
} __attribute__ ((packed));
-
struct ieee80211_txb {
u8 nr_frags;
u8 encrypted;
- u16 reserved;
- u16 frag_size;
- u16 payload_size;
+ u8 rts_included;
+ u8 reserved;
+ __le16 frag_size;
+ __le16 payload_size;
struct sk_buff *fragments[0];
};
-
/* SWEEP TABLE ENTRIES NUMBER */
#define MAX_SWEEP_TAB_ENTRIES 42
#define MAX_SWEEP_TAB_ENTRIES_PER_PACKET 7
@@ -604,9 +684,68 @@ struct ieee80211_txb {
#define MAX_WPA_IE_LEN 64
-#define NETWORK_EMPTY_ESSID (1<<0)
-#define NETWORK_HAS_OFDM (1<<1)
-#define NETWORK_HAS_CCK (1<<2)
+#define NETWORK_EMPTY_ESSID (1<<0)
+#define NETWORK_HAS_OFDM (1<<1)
+#define NETWORK_HAS_CCK (1<<2)
+
+/* QoS structure */
+#define NETWORK_HAS_QOS_PARAMETERS (1<<3)
+#define NETWORK_HAS_QOS_INFORMATION (1<<4)
+#define NETWORK_HAS_QOS_MASK (NETWORK_HAS_QOS_PARAMETERS | NETWORK_HAS_QOS_INFORMATION)
+
+#define QOS_QUEUE_NUM 4
+#define QOS_OUI_LEN 3
+#define QOS_OUI_TYPE 2
+#define QOS_ELEMENT_ID 221
+#define QOS_OUI_INFO_SUB_TYPE 0
+#define QOS_OUI_PARAM_SUB_TYPE 1
+#define QOS_VERSION_1 1
+#define QOS_AIFSN_MIN_VALUE 2
+
+struct ieee80211_qos_information_element {
+ u8 elementID;
+ u8 length;
+ u8 qui[QOS_OUI_LEN];
+ u8 qui_type;
+ u8 qui_subtype;
+ u8 version;
+ u8 ac_info;
+} __attribute__ ((packed));
+
+struct ieee80211_qos_ac_parameter {
+ u8 aci_aifsn;
+ u8 ecw_min_max;
+ __le16 tx_op_limit;
+} __attribute__ ((packed));
+
+struct ieee80211_qos_parameter_info {
+ struct ieee80211_qos_information_element info_element;
+ u8 reserved;
+ struct ieee80211_qos_ac_parameter ac_params_record[QOS_QUEUE_NUM];
+} __attribute__ ((packed));
+
+struct ieee80211_qos_parameters {
+ __le16 cw_min[QOS_QUEUE_NUM];
+ __le16 cw_max[QOS_QUEUE_NUM];
+ u8 aifs[QOS_QUEUE_NUM];
+ u8 flag[QOS_QUEUE_NUM];
+ __le16 tx_op_limit[QOS_QUEUE_NUM];
+} __attribute__ ((packed));
+
+struct ieee80211_qos_data {
+ struct ieee80211_qos_parameters parameters;
+ int active;
+ int supported;
+ u8 param_count;
+ u8 old_param_count;
+};
+
+struct ieee80211_tim_parameters {
+ u8 tim_count;
+ u8 tim_period;
+} __attribute__ ((packed));
+
+/*******************************************************/
struct ieee80211_network {
/* These entries are used to identify a unique network */
@@ -616,6 +755,8 @@ struct ieee80211_network {
u8 ssid[IW_ESSID_MAX_SIZE + 1];
u8 ssid_len;
+ struct ieee80211_qos_data qos_data;
+
/* These are network statistics */
struct ieee80211_rx_stats stats;
u16 capability;
@@ -631,10 +772,12 @@ struct ieee80211_network {
u16 beacon_interval;
u16 listen_interval;
u16 atim_window;
+ u8 erp_value;
u8 wpa_ie[MAX_WPA_IE_LEN];
size_t wpa_ie_len;
u8 rsn_ie[MAX_WPA_IE_LEN];
size_t rsn_ie_len;
+ struct ieee80211_tim_parameters tim;
struct list_head list;
};
@@ -651,17 +794,52 @@ enum ieee80211_state {
#define DEFAULT_MAX_SCAN_AGE (15 * HZ)
#define DEFAULT_FTS 2346
-
#define CFG_IEEE80211_RESERVE_FCS (1<<0)
#define CFG_IEEE80211_COMPUTE_FCS (1<<1)
+#define CFG_IEEE80211_RTS (1<<2)
+
+#define IEEE80211_24GHZ_MIN_CHANNEL 1
+#define IEEE80211_24GHZ_MAX_CHANNEL 14
+#define IEEE80211_24GHZ_CHANNELS 14
+
+#define IEEE80211_52GHZ_MIN_CHANNEL 36
+#define IEEE80211_52GHZ_MAX_CHANNEL 165
+#define IEEE80211_52GHZ_CHANNELS 32
+
+enum {
+ IEEE80211_CH_PASSIVE_ONLY = (1 << 0),
+ IEEE80211_CH_B_ONLY = (1 << 2),
+ IEEE80211_CH_NO_IBSS = (1 << 3),
+ IEEE80211_CH_UNIFORM_SPREADING = (1 << 4),
+ IEEE80211_CH_RADAR_DETECT = (1 << 5),
+ IEEE80211_CH_INVALID = (1 << 6),
+};
+
+struct ieee80211_channel {
+ u32 freq;
+ u8 channel;
+ u8 flags;
+ u8 max_power;
+};
+
+struct ieee80211_geo {
+ u8 name[4];
+ u8 bg_channels;
+ u8 a_channels;
+ struct ieee80211_channel bg[IEEE80211_24GHZ_CHANNELS];
+ struct ieee80211_channel a[IEEE80211_52GHZ_CHANNELS];
+};
struct ieee80211_device {
struct net_device *dev;
+ struct ieee80211_security sec;
/* Bookkeeping structures */
struct net_device_stats stats;
struct ieee80211_stats ieee_stats;
+ struct ieee80211_geo geo;
+
/* Probe / Beacon management */
struct list_head network_free_list;
struct list_head network_list;
@@ -669,62 +847,102 @@ struct ieee80211_device {
int scans;
int scan_age;
- int iw_mode; /* operating mode (IW_MODE_*) */
+ int iw_mode; /* operating mode (IW_MODE_*) */
+ struct iw_spy_data spy_data; /* iwspy support */
spinlock_t lock;
- int tx_headroom; /* Set to size of any additional room needed at front
- * of allocated Tx SKBs */
+ int tx_headroom; /* Set to size of any additional room needed at front
+ * of allocated Tx SKBs */
u32 config;
/* WEP and other encryption related settings at the device level */
- int open_wep; /* Set to 1 to allow unencrypted frames */
+ int open_wep; /* Set to 1 to allow unencrypted frames */
- int reset_on_keychange; /* Set to 1 if the HW needs to be reset on
+ int reset_on_keychange; /* Set to 1 if the HW needs to be reset on
* WEP key changes */
/* If the host performs {en,de}cryption, then set to 1 */
int host_encrypt;
+ int host_encrypt_msdu;
int host_decrypt;
- int ieee802_1x; /* is IEEE 802.1X used */
+ /* host performs multicast decryption */
+ int host_mc_decrypt;
+
+ int host_open_frag;
+ int host_build_iv;
+ int ieee802_1x; /* is IEEE 802.1X used */
/* WPA data */
int wpa_enabled;
int drop_unencrypted;
- int tkip_countermeasures;
int privacy_invoked;
size_t wpa_ie_len;
u8 *wpa_ie;
struct list_head crypt_deinit_list;
struct ieee80211_crypt_data *crypt[WEP_KEYS];
- int tx_keyidx; /* default TX key index (crypt[tx_keyidx]) */
+ int tx_keyidx; /* default TX key index (crypt[tx_keyidx]) */
struct timer_list crypt_deinit_timer;
+ int crypt_quiesced;
- int bcrx_sta_key; /* use individual keys to override default keys even
- * with RX of broad/multicast frames */
+ int bcrx_sta_key; /* use individual keys to override default keys even
+ * with RX of broad/multicast frames */
/* Fragmentation structures */
struct ieee80211_frag_entry frag_cache[IEEE80211_FRAG_CACHE_LEN];
unsigned int frag_next_idx;
- u16 fts; /* Fragmentation Threshold */
+ u16 fts; /* Fragmentation Threshold */
+ u16 rts; /* RTS threshold */
/* Association info */
u8 bssid[ETH_ALEN];
enum ieee80211_state state;
- int mode; /* A, B, G */
- int modulation; /* CCK, OFDM */
- int freq_band; /* 2.4Ghz, 5.2Ghz, Mixed */
- int abg_ture; /* ABG flag */
+ int mode; /* A, B, G */
+ int modulation; /* CCK, OFDM */
+ int freq_band; /* 2.4Ghz, 5.2Ghz, Mixed */
+ int abg_true; /* ABG flag */
+
+ int perfect_rssi;
+ int worst_rssi;
/* Callback functions */
- void (*set_security)(struct net_device *dev,
- struct ieee80211_security *sec);
- int (*hard_start_xmit)(struct ieee80211_txb *txb,
- struct net_device *dev);
- int (*reset_port)(struct net_device *dev);
+ void (*set_security) (struct net_device * dev,
+ struct ieee80211_security * sec);
+ int (*hard_start_xmit) (struct ieee80211_txb * txb,
+ struct net_device * dev, int pri);
+ int (*reset_port) (struct net_device * dev);
+ int (*is_queue_full) (struct net_device * dev, int pri);
+
+ int (*handle_management) (struct net_device * dev,
+ struct ieee80211_network * network, u16 type);
+
+ /* Typical STA methods */
+ int (*handle_auth) (struct net_device * dev,
+ struct ieee80211_auth * auth);
+ int (*handle_deauth) (struct net_device * dev,
+ struct ieee80211_auth * auth);
+ int (*handle_disassoc) (struct net_device * dev,
+ struct ieee80211_disassoc * assoc);
+ int (*handle_beacon) (struct net_device * dev,
+ struct ieee80211_beacon * beacon,
+ struct ieee80211_network * network);
+ int (*handle_probe_response) (struct net_device * dev,
+ struct ieee80211_probe_response * resp,
+ struct ieee80211_network * network);
+ int (*handle_probe_request) (struct net_device * dev,
+ struct ieee80211_probe_request * req,
+ struct ieee80211_rx_stats * stats);
+ int (*handle_assoc_response) (struct net_device * dev,
+ struct ieee80211_assoc_response * resp,
+ struct ieee80211_network * network);
+
+ /* Typical AP methods */
+ int (*handle_assoc_request) (struct net_device * dev);
+ int (*handle_reassoc_request) (struct net_device * dev,
+ struct ieee80211_reassoc_request * req);
/* This must be the last item so that it points to the data
* allocated beyond this structure by alloc_ieee80211 */
@@ -736,12 +954,12 @@ struct ieee80211_device {
#define IEEE_G (1<<2)
#define IEEE_MODE_MASK (IEEE_A|IEEE_B|IEEE_G)
-extern inline void *ieee80211_priv(struct net_device *dev)
+static inline void *ieee80211_priv(struct net_device *dev)
{
return ((struct ieee80211_device *)netdev_priv(dev))->priv;
}
-extern inline int ieee80211_is_empty_essid(const char *essid, int essid_len)
+static inline int ieee80211_is_empty_essid(const char *essid, int essid_len)
{
/* Single white space is for Linksys APs */
if (essid_len == 1 && essid[0] == ' ')
@@ -757,7 +975,8 @@ extern inline int ieee80211_is_empty_essid(const char *essid, int essid_len)
return 1;
}
-extern inline int ieee80211_is_valid_mode(struct ieee80211_device *ieee, int mode)
+static inline int ieee80211_is_valid_mode(struct ieee80211_device *ieee,
+ int mode)
{
/*
* It is possible for both access points and our device to support
@@ -783,14 +1002,17 @@ extern inline int ieee80211_is_valid_mode(struct ieee80211_device *ieee, int mod
return 0;
}
-extern inline int ieee80211_get_hdrlen(u16 fc)
+static inline int ieee80211_get_hdrlen(u16 fc)
{
int hdrlen = IEEE80211_3ADDR_LEN;
+ u16 stype = WLAN_FC_GET_STYPE(fc);
switch (WLAN_FC_GET_TYPE(fc)) {
case IEEE80211_FTYPE_DATA:
if ((fc & IEEE80211_FCTL_FROMDS) && (fc & IEEE80211_FCTL_TODS))
hdrlen = IEEE80211_4ADDR_LEN;
+ if (stype & IEEE80211_STYPE_QOS_DATA)
+ hdrlen += 2;
break;
case IEEE80211_FTYPE_CTL:
switch (WLAN_FC_GET_STYPE(fc)) {
@@ -808,7 +1030,48 @@ extern inline int ieee80211_get_hdrlen(u16 fc)
return hdrlen;
}
+static inline u8 *ieee80211_get_payload(struct ieee80211_hdr *hdr)
+{
+ switch (ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl))) {
+ case IEEE80211_1ADDR_LEN:
+ return ((struct ieee80211_hdr_1addr *)hdr)->payload;
+ case IEEE80211_2ADDR_LEN:
+ return ((struct ieee80211_hdr_2addr *)hdr)->payload;
+ case IEEE80211_3ADDR_LEN:
+ return ((struct ieee80211_hdr_3addr *)hdr)->payload;
+ case IEEE80211_4ADDR_LEN:
+ return ((struct ieee80211_hdr_4addr *)hdr)->payload;
+ }
+
+}
+
+static inline int ieee80211_is_ofdm_rate(u8 rate)
+{
+ switch (rate & ~IEEE80211_BASIC_RATE_MASK) {
+ case IEEE80211_OFDM_RATE_6MB:
+ case IEEE80211_OFDM_RATE_9MB:
+ case IEEE80211_OFDM_RATE_12MB:
+ case IEEE80211_OFDM_RATE_18MB:
+ case IEEE80211_OFDM_RATE_24MB:
+ case IEEE80211_OFDM_RATE_36MB:
+ case IEEE80211_OFDM_RATE_48MB:
+ case IEEE80211_OFDM_RATE_54MB:
+ return 1;
+ }
+ return 0;
+}
+static inline int ieee80211_is_cck_rate(u8 rate)
+{
+ switch (rate & ~IEEE80211_BASIC_RATE_MASK) {
+ case IEEE80211_CCK_RATE_1MB:
+ case IEEE80211_CCK_RATE_2MB:
+ case IEEE80211_CCK_RATE_5MB:
+ case IEEE80211_CCK_RATE_11MB:
+ return 1;
+ }
+ return 0;
+}
/* ieee80211.c */
extern void free_ieee80211(struct net_device *dev);
@@ -817,18 +1080,30 @@ extern struct net_device *alloc_ieee80211(int sizeof_priv);
extern int ieee80211_set_encryption(struct ieee80211_device *ieee);
/* ieee80211_tx.c */
-extern int ieee80211_xmit(struct sk_buff *skb,
- struct net_device *dev);
+extern int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev);
extern void ieee80211_txb_free(struct ieee80211_txb *);
-
+extern int ieee80211_tx_frame(struct ieee80211_device *ieee,
+ struct ieee80211_hdr *frame, int len);
/* ieee80211_rx.c */
extern int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
struct ieee80211_rx_stats *rx_stats);
extern void ieee80211_rx_mgt(struct ieee80211_device *ieee,
- struct ieee80211_hdr *header,
+ struct ieee80211_hdr_4addr *header,
struct ieee80211_rx_stats *stats);
+/* ieee80211_geo.c */
+extern const struct ieee80211_geo *ieee80211_get_geo(struct ieee80211_device
+ *ieee);
+extern int ieee80211_set_geo(struct ieee80211_device *ieee,
+ const struct ieee80211_geo *geo);
+
+extern int ieee80211_is_valid_channel(struct ieee80211_device *ieee,
+ u8 channel);
+extern int ieee80211_channel_to_index(struct ieee80211_device *ieee,
+ u8 channel);
+extern u8 ieee80211_freq_to_channel(struct ieee80211_device *ieee, u32 freq);
+
/* ieee80211_wx.c */
extern int ieee80211_wx_get_scan(struct ieee80211_device *ieee,
struct iw_request_info *info,
@@ -839,17 +1114,21 @@ extern int ieee80211_wx_set_encode(struct ieee80211_device *ieee,
extern int ieee80211_wx_get_encode(struct ieee80211_device *ieee,
struct iw_request_info *info,
union iwreq_data *wrqu, char *key);
-
-
-extern inline void ieee80211_increment_scans(struct ieee80211_device *ieee)
+extern int ieee80211_wx_set_encodeext(struct ieee80211_device *ieee,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra);
+extern int ieee80211_wx_get_encodeext(struct ieee80211_device *ieee,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra);
+
+static inline void ieee80211_increment_scans(struct ieee80211_device *ieee)
{
ieee->scans++;
}
-extern inline int ieee80211_get_scans(struct ieee80211_device *ieee)
+static inline int ieee80211_get_scans(struct ieee80211_device *ieee)
{
return ieee->scans;
}
-
-#endif /* IEEE80211_H */
+#endif /* IEEE80211_H */
diff --git a/include/net/ieee80211_crypt.h b/include/net/ieee80211_crypt.h
index b58a3bcc0dc..0a1c2d82ca4 100644
--- a/include/net/ieee80211_crypt.h
+++ b/include/net/ieee80211_crypt.h
@@ -25,16 +25,22 @@
#include <linux/skbuff.h>
+enum {
+ IEEE80211_CRYPTO_TKIP_COUNTERMEASURES = (1 << 0),
+};
+
struct ieee80211_crypto_ops {
const char *name;
/* init new crypto context (e.g., allocate private data space,
* select IV, etc.); returns NULL on failure or pointer to allocated
* private data on success */
- void * (*init)(int keyidx);
+ void *(*init) (int keyidx);
/* deinitialize crypto context and free allocated private data */
- void (*deinit)(void *priv);
+ void (*deinit) (void *priv);
+
+ int (*build_iv) (struct sk_buff * skb, int hdr_len, void *priv);
/* encrypt/decrypt return < 0 on error or >= 0 on success. The return
* value from decrypt_mpdu is passed as the keyidx value for
@@ -42,34 +48,39 @@ struct ieee80211_crypto_ops {
* encryption; if not, error will be returned; these functions are
* called for all MPDUs (i.e., fragments).
*/
- int (*encrypt_mpdu)(struct sk_buff *skb, int hdr_len, void *priv);
- int (*decrypt_mpdu)(struct sk_buff *skb, int hdr_len, void *priv);
+ int (*encrypt_mpdu) (struct sk_buff * skb, int hdr_len, void *priv);
+ int (*decrypt_mpdu) (struct sk_buff * skb, int hdr_len, void *priv);
/* These functions are called for full MSDUs, i.e. full frames.
* These can be NULL if full MSDU operations are not needed. */
- int (*encrypt_msdu)(struct sk_buff *skb, int hdr_len, void *priv);
- int (*decrypt_msdu)(struct sk_buff *skb, int keyidx, int hdr_len,
- void *priv);
+ int (*encrypt_msdu) (struct sk_buff * skb, int hdr_len, void *priv);
+ int (*decrypt_msdu) (struct sk_buff * skb, int keyidx, int hdr_len,
+ void *priv);
- int (*set_key)(void *key, int len, u8 *seq, void *priv);
- int (*get_key)(void *key, int len, u8 *seq, void *priv);
+ int (*set_key) (void *key, int len, u8 * seq, void *priv);
+ int (*get_key) (void *key, int len, u8 * seq, void *priv);
/* procfs handler for printing out key information and possible
* statistics */
- char * (*print_stats)(char *p, void *priv);
+ char *(*print_stats) (char *p, void *priv);
+
+ /* Crypto specific flag get/set for configuration settings */
+ unsigned long (*get_flags) (void *priv);
+ unsigned long (*set_flags) (unsigned long flags, void *priv);
/* maximum number of bytes added by encryption; encrypt buf is
* allocated with extra_prefix_len bytes, copy of in_buf, and
* extra_postfix_len; encrypt need not use all this space, but
* the result must start at the beginning of the buffer and correct
* length must be returned */
- int extra_prefix_len, extra_postfix_len;
+ int extra_mpdu_prefix_len, extra_mpdu_postfix_len;
+ int extra_msdu_prefix_len, extra_msdu_postfix_len;
struct module *owner;
};
struct ieee80211_crypt_data {
- struct list_head list; /* delayed deletion list */
+ struct list_head list; /* delayed deletion list */
struct ieee80211_crypto_ops *ops;
void *priv;
atomic_t refcnt;
@@ -77,10 +88,11 @@ struct ieee80211_crypt_data {
int ieee80211_register_crypto_ops(struct ieee80211_crypto_ops *ops);
int ieee80211_unregister_crypto_ops(struct ieee80211_crypto_ops *ops);
-struct ieee80211_crypto_ops * ieee80211_get_crypto_ops(const char *name);
+struct ieee80211_crypto_ops *ieee80211_get_crypto_ops(const char *name);
void ieee80211_crypt_deinit_entries(struct ieee80211_device *, int);
void ieee80211_crypt_deinit_handler(unsigned long);
void ieee80211_crypt_delayed_deinit(struct ieee80211_device *ieee,
struct ieee80211_crypt_data **crypt);
+void ieee80211_crypt_quiescing(struct ieee80211_device *ieee);
#endif
diff --git a/include/net/ieee80211_radiotap.h b/include/net/ieee80211_radiotap.h
new file mode 100644
index 00000000000..429b73892a5
--- /dev/null
+++ b/include/net/ieee80211_radiotap.h
@@ -0,0 +1,231 @@
+/* $FreeBSD: src/sys/net80211/ieee80211_radiotap.h,v 1.5 2005/01/22 20:12:05 sam Exp $ */
+/* $NetBSD: ieee80211_radiotap.h,v 1.11 2005/06/22 06:16:02 dyoung Exp $ */
+
+/*-
+ * Copyright (c) 2003, 2004 David Young. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of David Young may not be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY DAVID YOUNG ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL DAVID
+ * YOUNG BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+ * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
+ * OF SUCH DAMAGE.
+ */
+
+/*
+ * Modifications to fit into the linux IEEE 802.11 stack,
+ * Mike Kershaw (dragorn@kismetwireless.net)
+ */
+
+#ifndef IEEE80211RADIOTAP_H
+#define IEEE80211RADIOTAP_H
+
+#include <linux/if_ether.h>
+#include <linux/kernel.h>
+
+/* Radiotap header version (from official NetBSD feed) */
+#define IEEE80211RADIOTAP_VERSION "1.5"
+/* Base version of the radiotap packet header data */
+#define PKTHDR_RADIOTAP_VERSION 0
+
+/* A generic radio capture format is desirable. There is one for
+ * Linux, but it is neither rigidly defined (there were not even
+ * units given for some fields) nor easily extensible.
+ *
+ * I suggest the following extensible radio capture format. It is
+ * based on a bitmap indicating which fields are present.
+ *
+ * I am trying to describe precisely what the application programmer
+ * should expect in the following, and for that reason I tell the
+ * units and origin of each measurement (where it applies), or else I
+ * use sufficiently weaselly language ("is a monotonically nondecreasing
+ * function of...") that I cannot set false expectations for lawyerly
+ * readers.
+ */
+
+/* XXX tcpdump/libpcap do not tolerate variable-length headers,
+ * yet, so we pad every radiotap header to 64 bytes. Ugh.
+ */
+#define IEEE80211_RADIOTAP_HDRLEN 64
+
+/* The radio capture header precedes the 802.11 header. */
+struct ieee80211_radiotap_header {
+ u8 it_version; /* Version 0. Only increases
+ * for drastic changes,
+ * introduction of compatible
+ * new fields does not count.
+ */
+ u8 it_pad;
+ u16 it_len; /* length of the whole
+ * header in bytes, including
+ * it_version, it_pad,
+ * it_len, and data fields.
+ */
+ u32 it_present; /* A bitmap telling which
+ * fields are present. Set bit 31
+ * (0x80000000) to extend the
+ * bitmap by another 32 bits.
+ * Additional extensions are made
+ * by setting bit 31.
+ */
+};
+
+/* Name Data type Units
+ * ---- --------- -----
+ *
+ * IEEE80211_RADIOTAP_TSFT u64 microseconds
+ *
+ * Value in microseconds of the MAC's 64-bit 802.11 Time
+ * Synchronization Function timer when the first bit of the
+ * MPDU arrived at the MAC. For received frames, only.
+ *
+ * IEEE80211_RADIOTAP_CHANNEL 2 x u16 MHz, bitmap
+ *
+ * Tx/Rx frequency in MHz, followed by flags (see below).
+ *
+ * IEEE80211_RADIOTAP_FHSS u16 see below
+ *
+ * For frequency-hopping radios, the hop set (first byte)
+ * and pattern (second byte).
+ *
+ * IEEE80211_RADIOTAP_RATE u8 500kb/s
+ *
+ * Tx/Rx data rate
+ *
+ * IEEE80211_RADIOTAP_DBM_ANTSIGNAL int8_t decibels from
+ * one milliwatt (dBm)
+ *
+ * RF signal power at the antenna, decibel difference from
+ * one milliwatt.
+ *
+ * IEEE80211_RADIOTAP_DBM_ANTNOISE int8_t decibels from
+ * one milliwatt (dBm)
+ *
+ * RF noise power at the antenna, decibel difference from one
+ * milliwatt.
+ *
+ * IEEE80211_RADIOTAP_DB_ANTSIGNAL u8 decibel (dB)
+ *
+ * RF signal power at the antenna, decibel difference from an
+ * arbitrary, fixed reference.
+ *
+ * IEEE80211_RADIOTAP_DB_ANTNOISE u8 decibel (dB)
+ *
+ * RF noise power at the antenna, decibel difference from an
+ * arbitrary, fixed reference point.
+ *
+ * IEEE80211_RADIOTAP_LOCK_QUALITY u16 unitless
+ *
+ * Quality of Barker code lock. Unitless. Monotonically
+ * nondecreasing with "better" lock strength. Called "Signal
+ * Quality" in datasheets. (Is there a standard way to measure
+ * this?)
+ *
+ * IEEE80211_RADIOTAP_TX_ATTENUATION u16 unitless
+ *
+ * Transmit power expressed as unitless distance from max
+ * power set at factory calibration. 0 is max power.
+ * Monotonically nondecreasing with lower power levels.
+ *
+ * IEEE80211_RADIOTAP_DB_TX_ATTENUATION u16 decibels (dB)
+ *
+ * Transmit power expressed as decibel distance from max power
+ * set at factory calibration. 0 is max power. Monotonically
+ * nondecreasing with lower power levels.
+ *
+ * IEEE80211_RADIOTAP_DBM_TX_POWER int8_t decibels from
+ * one milliwatt (dBm)
+ *
+ * Transmit power expressed as dBm (decibels from a 1 milliwatt
+ * reference). This is the absolute power level measured at
+ * the antenna port.
+ *
+ * IEEE80211_RADIOTAP_FLAGS u8 bitmap
+ *
+ * Properties of transmitted and received frames. See flags
+ * defined below.
+ *
+ * IEEE80211_RADIOTAP_ANTENNA u8 antenna index
+ *
+ * Unitless indication of the Rx/Tx antenna for this packet.
+ * The first antenna is antenna 0.
+ *
+ * IEEE80211_RADIOTAP_FCS u32 data
+ *
+ * FCS from frame in network byte order.
+ */
+enum ieee80211_radiotap_type {
+ IEEE80211_RADIOTAP_TSFT = 0,
+ IEEE80211_RADIOTAP_FLAGS = 1,
+ IEEE80211_RADIOTAP_RATE = 2,
+ IEEE80211_RADIOTAP_CHANNEL = 3,
+ IEEE80211_RADIOTAP_FHSS = 4,
+ IEEE80211_RADIOTAP_DBM_ANTSIGNAL = 5,
+ IEEE80211_RADIOTAP_DBM_ANTNOISE = 6,
+ IEEE80211_RADIOTAP_LOCK_QUALITY = 7,
+ IEEE80211_RADIOTAP_TX_ATTENUATION = 8,
+ IEEE80211_RADIOTAP_DB_TX_ATTENUATION = 9,
+ IEEE80211_RADIOTAP_DBM_TX_POWER = 10,
+ IEEE80211_RADIOTAP_ANTENNA = 11,
+ IEEE80211_RADIOTAP_DB_ANTSIGNAL = 12,
+ IEEE80211_RADIOTAP_DB_ANTNOISE = 13,
+ IEEE80211_RADIOTAP_EXT = 31,
+};
+
+/* Channel flags. */
+#define IEEE80211_CHAN_TURBO 0x0010 /* Turbo channel */
+#define IEEE80211_CHAN_CCK 0x0020 /* CCK channel */
+#define IEEE80211_CHAN_OFDM 0x0040 /* OFDM channel */
+#define IEEE80211_CHAN_2GHZ 0x0080 /* 2 GHz spectrum channel. */
+#define IEEE80211_CHAN_5GHZ 0x0100 /* 5 GHz spectrum channel */
+#define IEEE80211_CHAN_PASSIVE 0x0200 /* Only passive scan allowed */
+#define IEEE80211_CHAN_DYN 0x0400 /* Dynamic CCK-OFDM channel */
+#define IEEE80211_CHAN_GFSK 0x0800 /* GFSK channel (FHSS PHY) */
+
+/* For IEEE80211_RADIOTAP_FLAGS */
+#define IEEE80211_RADIOTAP_F_CFP 0x01 /* sent/received
+ * during CFP
+ */
+#define IEEE80211_RADIOTAP_F_SHORTPRE 0x02 /* sent/received
+ * with short
+ * preamble
+ */
+#define IEEE80211_RADIOTAP_F_WEP 0x04 /* sent/received
+ * with WEP encryption
+ */
+#define IEEE80211_RADIOTAP_F_FRAG 0x08 /* sent/received
+ * with fragmentation
+ */
+#define IEEE80211_RADIOTAP_F_FCS 0x10 /* frame includes FCS */
+#define IEEE80211_RADIOTAP_F_DATAPAD 0x20 /* frame has padding between
+ * 802.11 header and payload
+ * (to 32-bit boundary)
+ */
+
+/* Ugly macro to convert literal channel numbers into their mhz equivalents
+ * There are certianly some conditions that will break this (like feeding it '30')
+ * but they shouldn't arise since nothing talks on channel 30. */
+#define ieee80211chan2mhz(x) \
+ (((x) <= 14) ? \
+ (((x) == 14) ? 2484 : ((x) * 5) + 2407) : \
+ ((x) + 1000) * 5)
+
+#endif /* IEEE80211_RADIOTAP_H */
diff --git a/include/net/llc_pdu.h b/include/net/llc_pdu.h
index f45c37d89cf..c7a959428b4 100644
--- a/include/net/llc_pdu.h
+++ b/include/net/llc_pdu.h
@@ -254,8 +254,10 @@ static inline void llc_pdu_decode_sa(struct sk_buff *skb, u8 *sa)
{
if (skb->protocol == ntohs(ETH_P_802_2))
memcpy(sa, eth_hdr(skb)->h_source, ETH_ALEN);
- else if (skb->protocol == ntohs(ETH_P_TR_802_2))
+ else if (skb->protocol == ntohs(ETH_P_TR_802_2)) {
memcpy(sa, tr_hdr(skb)->saddr, ETH_ALEN);
+ *sa &= 0x7F;
+ }
}
/**
diff --git a/include/net/sock.h b/include/net/sock.h
index ecb75526cba..e0498bd3600 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -207,7 +207,7 @@ struct sock {
struct sk_buff_head sk_write_queue;
int sk_wmem_queued;
int sk_forward_alloc;
- unsigned int sk_allocation;
+ gfp_t sk_allocation;
int sk_sndbuf;
int sk_route_caps;
unsigned long sk_flags;
diff --git a/include/net/syncppp.h b/include/net/syncppp.h
index 614cb6ba564..877efa43470 100644
--- a/include/net/syncppp.h
+++ b/include/net/syncppp.h
@@ -86,7 +86,6 @@ static inline struct sppp *sppp_of(struct net_device *dev)
void sppp_attach (struct ppp_device *pd);
void sppp_detach (struct net_device *dev);
-void sppp_input (struct net_device *dev, struct sk_buff *m);
int sppp_do_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd);
struct sk_buff *sppp_dequeue (struct net_device *dev);
int sppp_isempty (struct net_device *dev);
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index bed4b7c9be9..e6b61fab66d 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -146,7 +146,7 @@ struct scsi_cmnd {
#define SCSI_STATE_MLQUEUE 0x100b
-extern struct scsi_cmnd *scsi_get_command(struct scsi_device *, int);
+extern struct scsi_cmnd *scsi_get_command(struct scsi_device *, gfp_t);
extern void scsi_put_command(struct scsi_cmnd *);
extern void scsi_io_completion(struct scsi_cmnd *, unsigned int, unsigned int);
extern void scsi_finish_command(struct scsi_cmnd *cmd);
diff --git a/include/scsi/scsi_request.h b/include/scsi/scsi_request.h
index 6a140020d7c..2539debb799 100644
--- a/include/scsi/scsi_request.h
+++ b/include/scsi/scsi_request.h
@@ -45,7 +45,7 @@ struct scsi_request {
level driver) of this request */
};
-extern struct scsi_request *scsi_allocate_request(struct scsi_device *, int);
+extern struct scsi_request *scsi_allocate_request(struct scsi_device *, gfp_t);
extern void scsi_release_request(struct scsi_request *);
extern void scsi_wait_req(struct scsi_request *, const void *cmnd,
void *buffer, unsigned bufflen,
diff --git a/include/sound/memalloc.h b/include/sound/memalloc.h
index 3a2fd2cc9f1..83489c3abba 100644
--- a/include/sound/memalloc.h
+++ b/include/sound/memalloc.h
@@ -111,7 +111,7 @@ size_t snd_dma_get_reserved_buf(struct snd_dma_buffer *dmab, unsigned int id);
int snd_dma_reserve_buf(struct snd_dma_buffer *dmab, unsigned int id);
/* basic memory allocation functions */
-void *snd_malloc_pages(size_t size, unsigned int gfp_flags);
+void *snd_malloc_pages(size_t size, gfp_t gfp_flags);
void snd_free_pages(void *ptr, size_t size);
#endif /* __SOUND_MEMALLOC_H */
diff --git a/kernel/audit.c b/kernel/audit.c
index aefa73a8a58..0c56320d38d 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -133,7 +133,7 @@ struct audit_buffer {
struct list_head list;
struct sk_buff *skb; /* formatted skb ready to send */
struct audit_context *ctx; /* NULL or associated context */
- int gfp_mask;
+ gfp_t gfp_mask;
};
static void audit_set_pid(struct audit_buffer *ab, pid_t pid)
@@ -647,7 +647,7 @@ static inline void audit_get_stamp(struct audit_context *ctx,
* will be written at syscall exit. If there is no associated task, tsk
* should be NULL. */
-struct audit_buffer *audit_log_start(struct audit_context *ctx, int gfp_mask,
+struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask,
int type)
{
struct audit_buffer *ab = NULL;
@@ -879,7 +879,7 @@ void audit_log_end(struct audit_buffer *ab)
/* Log an audit record. This is a convenience function that calls
* audit_log_start, audit_log_vformat, and audit_log_end. It may be
* called in any context. */
-void audit_log(struct audit_context *ctx, int gfp_mask, int type,
+void audit_log(struct audit_context *ctx, gfp_t gfp_mask, int type,
const char *fmt, ...)
{
struct audit_buffer *ab;
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index 88696f639aa..d8a68509e72 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -803,7 +803,7 @@ static void audit_log_task_info(struct audit_buffer *ab)
up_read(&mm->mmap_sem);
}
-static void audit_log_exit(struct audit_context *context, unsigned int gfp_mask)
+static void audit_log_exit(struct audit_context *context, gfp_t gfp_mask)
{
int i;
struct audit_buffer *ab;
diff --git a/kernel/exit.c b/kernel/exit.c
index 43077732619..3b25b182d2b 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -843,6 +843,7 @@ fastcall NORET_TYPE void do_exit(long code)
group_dead = atomic_dec_and_test(&tsk->signal->live);
if (group_dead) {
del_timer_sync(&tsk->signal->real_timer);
+ exit_itimers(tsk->signal);
acct_process(code);
}
exit_mm(tsk);
diff --git a/kernel/kexec.c b/kernel/kexec.c
index cdd4dcd8fb6..36c5d9cd4cc 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -90,7 +90,7 @@ int kexec_should_crash(struct task_struct *p)
static int kimage_is_destination_range(struct kimage *image,
unsigned long start, unsigned long end);
static struct page *kimage_alloc_page(struct kimage *image,
- unsigned int gfp_mask,
+ gfp_t gfp_mask,
unsigned long dest);
static int do_kimage_alloc(struct kimage **rimage, unsigned long entry,
@@ -326,8 +326,7 @@ static int kimage_is_destination_range(struct kimage *image,
return 0;
}
-static struct page *kimage_alloc_pages(unsigned int gfp_mask,
- unsigned int order)
+static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order)
{
struct page *pages;
@@ -654,7 +653,7 @@ static kimage_entry_t *kimage_dst_used(struct kimage *image,
}
static struct page *kimage_alloc_page(struct kimage *image,
- unsigned int gfp_mask,
+ gfp_t gfp_mask,
unsigned long destination)
{
/*
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index b3f3edc475d..bf374fceb39 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -91,7 +91,7 @@ static inline union cpu_time_count cpu_time_sub(clockid_t which_clock,
* Update expiry time from increment, and increase overrun count,
* given the current clock sample.
*/
-static inline void bump_cpu_timer(struct k_itimer *timer,
+static void bump_cpu_timer(struct k_itimer *timer,
union cpu_time_count now)
{
int i;
@@ -110,7 +110,7 @@ static inline void bump_cpu_timer(struct k_itimer *timer,
for (i = 0; incr < delta - incr; i++)
incr = incr << 1;
for (; i >= 0; incr >>= 1, i--) {
- if (delta <= incr)
+ if (delta < incr)
continue;
timer->it.cpu.expires.sched += incr;
timer->it_overrun += 1 << i;
@@ -128,7 +128,7 @@ static inline void bump_cpu_timer(struct k_itimer *timer,
for (i = 0; cputime_lt(incr, cputime_sub(delta, incr)); i++)
incr = cputime_add(incr, incr);
for (; i >= 0; incr = cputime_halve(incr), i--) {
- if (cputime_le(delta, incr))
+ if (cputime_lt(delta, incr))
continue;
timer->it.cpu.expires.cpu =
cputime_add(timer->it.cpu.expires.cpu, incr);
@@ -380,28 +380,31 @@ int posix_cpu_timer_create(struct k_itimer *new_timer)
int posix_cpu_timer_del(struct k_itimer *timer)
{
struct task_struct *p = timer->it.cpu.task;
+ int ret = 0;
- if (timer->it.cpu.firing)
- return TIMER_RETRY;
-
- if (unlikely(p == NULL))
- return 0;
+ if (likely(p != NULL)) {
+ read_lock(&tasklist_lock);
+ if (unlikely(p->signal == NULL)) {
+ /*
+ * We raced with the reaping of the task.
+ * The deletion should have cleared us off the list.
+ */
+ BUG_ON(!list_empty(&timer->it.cpu.entry));
+ } else {
+ spin_lock(&p->sighand->siglock);
+ if (timer->it.cpu.firing)
+ ret = TIMER_RETRY;
+ else
+ list_del(&timer->it.cpu.entry);
+ spin_unlock(&p->sighand->siglock);
+ }
+ read_unlock(&tasklist_lock);
- spin_lock(&p->sighand->siglock);
- if (!list_empty(&timer->it.cpu.entry)) {
- /*
- * Take us off the task's timer list. We don't need to
- * take tasklist_lock and check for the task being reaped.
- * If it was reaped, it already called posix_cpu_timers_exit
- * and posix_cpu_timers_exit_group to clear all the timers
- * that pointed to it.
- */
- list_del(&timer->it.cpu.entry);
- put_task_struct(p);
+ if (!ret)
+ put_task_struct(p);
}
- spin_unlock(&p->sighand->siglock);
- return 0;
+ return ret;
}
/*
@@ -418,8 +421,6 @@ static void cleanup_timers(struct list_head *head,
cputime_t ptime = cputime_add(utime, stime);
list_for_each_entry_safe(timer, next, head, entry) {
- put_task_struct(timer->task);
- timer->task = NULL;
list_del_init(&timer->entry);
if (cputime_lt(timer->expires.cpu, ptime)) {
timer->expires.cpu = cputime_zero;
@@ -431,8 +432,6 @@ static void cleanup_timers(struct list_head *head,
++head;
list_for_each_entry_safe(timer, next, head, entry) {
- put_task_struct(timer->task);
- timer->task = NULL;
list_del_init(&timer->entry);
if (cputime_lt(timer->expires.cpu, utime)) {
timer->expires.cpu = cputime_zero;
@@ -444,8 +443,6 @@ static void cleanup_timers(struct list_head *head,
++head;
list_for_each_entry_safe(timer, next, head, entry) {
- put_task_struct(timer->task);
- timer->task = NULL;
list_del_init(&timer->entry);
if (timer->expires.sched < sched_time) {
timer->expires.sched = 0;
@@ -489,6 +486,9 @@ static void process_timer_rebalance(struct task_struct *p,
struct task_struct *t = p;
unsigned int nthreads = atomic_read(&p->signal->live);
+ if (!nthreads)
+ return;
+
switch (clock_idx) {
default:
BUG();
@@ -497,7 +497,7 @@ static void process_timer_rebalance(struct task_struct *p,
left = cputime_div(cputime_sub(expires.cpu, val.cpu),
nthreads);
do {
- if (!unlikely(t->exit_state)) {
+ if (!unlikely(t->flags & PF_EXITING)) {
ticks = cputime_add(prof_ticks(t), left);
if (cputime_eq(t->it_prof_expires,
cputime_zero) ||
@@ -512,7 +512,7 @@ static void process_timer_rebalance(struct task_struct *p,
left = cputime_div(cputime_sub(expires.cpu, val.cpu),
nthreads);
do {
- if (!unlikely(t->exit_state)) {
+ if (!unlikely(t->flags & PF_EXITING)) {
ticks = cputime_add(virt_ticks(t), left);
if (cputime_eq(t->it_virt_expires,
cputime_zero) ||
@@ -527,7 +527,7 @@ static void process_timer_rebalance(struct task_struct *p,
nsleft = expires.sched - val.sched;
do_div(nsleft, nthreads);
do {
- if (!unlikely(t->exit_state)) {
+ if (!unlikely(t->flags & PF_EXITING)) {
ns = t->sched_time + nsleft;
if (t->it_sched_expires == 0 ||
t->it_sched_expires > ns) {
@@ -566,6 +566,9 @@ static void arm_timer(struct k_itimer *timer, union cpu_time_count now)
struct cpu_timer_list *next;
unsigned long i;
+ if (CPUCLOCK_PERTHREAD(timer->it_clock) && (p->flags & PF_EXITING))
+ return;
+
head = (CPUCLOCK_PERTHREAD(timer->it_clock) ?
p->cpu_timers : p->signal->cpu_timers);
head += CPUCLOCK_WHICH(timer->it_clock);
@@ -576,17 +579,15 @@ static void arm_timer(struct k_itimer *timer, union cpu_time_count now)
listpos = head;
if (CPUCLOCK_WHICH(timer->it_clock) == CPUCLOCK_SCHED) {
list_for_each_entry(next, head, entry) {
- if (next->expires.sched > nt->expires.sched) {
- listpos = &next->entry;
+ if (next->expires.sched > nt->expires.sched)
break;
- }
+ listpos = &next->entry;
}
} else {
list_for_each_entry(next, head, entry) {
- if (cputime_gt(next->expires.cpu, nt->expires.cpu)) {
- listpos = &next->entry;
+ if (cputime_gt(next->expires.cpu, nt->expires.cpu))
break;
- }
+ listpos = &next->entry;
}
}
list_add(&nt->entry, listpos);
@@ -730,9 +731,15 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags,
* Disarm any old timer after extracting its expiry time.
*/
BUG_ON(!irqs_disabled());
+
+ ret = 0;
spin_lock(&p->sighand->siglock);
old_expires = timer->it.cpu.expires;
- list_del_init(&timer->it.cpu.entry);
+ if (unlikely(timer->it.cpu.firing)) {
+ timer->it.cpu.firing = -1;
+ ret = TIMER_RETRY;
+ } else
+ list_del_init(&timer->it.cpu.entry);
spin_unlock(&p->sighand->siglock);
/*
@@ -780,7 +787,7 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags,
}
}
- if (unlikely(timer->it.cpu.firing)) {
+ if (unlikely(ret)) {
/*
* We are colliding with the timer actually firing.
* Punt after filling in the timer's old value, and
@@ -788,8 +795,6 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags,
* it as an overrun (thanks to bump_cpu_timer above).
*/
read_unlock(&tasklist_lock);
- timer->it.cpu.firing = -1;
- ret = TIMER_RETRY;
goto out;
}
@@ -955,14 +960,16 @@ void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
static void check_thread_timers(struct task_struct *tsk,
struct list_head *firing)
{
+ int maxfire;
struct list_head *timers = tsk->cpu_timers;
+ maxfire = 20;
tsk->it_prof_expires = cputime_zero;
while (!list_empty(timers)) {
struct cpu_timer_list *t = list_entry(timers->next,
struct cpu_timer_list,
entry);
- if (cputime_lt(prof_ticks(tsk), t->expires.cpu)) {
+ if (!--maxfire || cputime_lt(prof_ticks(tsk), t->expires.cpu)) {
tsk->it_prof_expires = t->expires.cpu;
break;
}
@@ -971,12 +978,13 @@ static void check_thread_timers(struct task_struct *tsk,
}
++timers;
+ maxfire = 20;
tsk->it_virt_expires = cputime_zero;
while (!list_empty(timers)) {
struct cpu_timer_list *t = list_entry(timers->next,
struct cpu_timer_list,
entry);
- if (cputime_lt(virt_ticks(tsk), t->expires.cpu)) {
+ if (!--maxfire || cputime_lt(virt_ticks(tsk), t->expires.cpu)) {
tsk->it_virt_expires = t->expires.cpu;
break;
}
@@ -985,12 +993,13 @@ static void check_thread_timers(struct task_struct *tsk,
}
++timers;
+ maxfire = 20;
tsk->it_sched_expires = 0;
while (!list_empty(timers)) {
struct cpu_timer_list *t = list_entry(timers->next,
struct cpu_timer_list,
entry);
- if (tsk->sched_time < t->expires.sched) {
+ if (!--maxfire || tsk->sched_time < t->expires.sched) {
tsk->it_sched_expires = t->expires.sched;
break;
}
@@ -1007,6 +1016,7 @@ static void check_thread_timers(struct task_struct *tsk,
static void check_process_timers(struct task_struct *tsk,
struct list_head *firing)
{
+ int maxfire;
struct signal_struct *const sig = tsk->signal;
cputime_t utime, stime, ptime, virt_expires, prof_expires;
unsigned long long sched_time, sched_expires;
@@ -1039,12 +1049,13 @@ static void check_process_timers(struct task_struct *tsk,
} while (t != tsk);
ptime = cputime_add(utime, stime);
+ maxfire = 20;
prof_expires = cputime_zero;
while (!list_empty(timers)) {
struct cpu_timer_list *t = list_entry(timers->next,
struct cpu_timer_list,
entry);
- if (cputime_lt(ptime, t->expires.cpu)) {
+ if (!--maxfire || cputime_lt(ptime, t->expires.cpu)) {
prof_expires = t->expires.cpu;
break;
}
@@ -1053,12 +1064,13 @@ static void check_process_timers(struct task_struct *tsk,
}
++timers;
+ maxfire = 20;
virt_expires = cputime_zero;
while (!list_empty(timers)) {
struct cpu_timer_list *t = list_entry(timers->next,
struct cpu_timer_list,
entry);
- if (cputime_lt(utime, t->expires.cpu)) {
+ if (!--maxfire || cputime_lt(utime, t->expires.cpu)) {
virt_expires = t->expires.cpu;
break;
}
@@ -1067,12 +1079,13 @@ static void check_process_timers(struct task_struct *tsk,
}
++timers;
+ maxfire = 20;
sched_expires = 0;
while (!list_empty(timers)) {
struct cpu_timer_list *t = list_entry(timers->next,
struct cpu_timer_list,
entry);
- if (sched_time < t->expires.sched) {
+ if (!--maxfire || sched_time < t->expires.sched) {
sched_expires = t->expires.sched;
break;
}
@@ -1155,6 +1168,9 @@ static void check_process_timers(struct task_struct *tsk,
unsigned long long sched_left, sched;
const unsigned int nthreads = atomic_read(&sig->live);
+ if (!nthreads)
+ return;
+
prof_left = cputime_sub(prof_expires, utime);
prof_left = cputime_sub(prof_left, stime);
prof_left = cputime_div(prof_left, nthreads);
@@ -1191,7 +1207,7 @@ static void check_process_timers(struct task_struct *tsk,
do {
t = next_thread(t);
- } while (unlikely(t->exit_state));
+ } while (unlikely(t->flags & PF_EXITING));
} while (t != tsk);
}
}
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
index b7b532acd9f..dda3cda73c7 100644
--- a/kernel/posix-timers.c
+++ b/kernel/posix-timers.c
@@ -1157,7 +1157,7 @@ retry_delete:
}
/*
- * This is called by __exit_signal, only when there are no more
+ * This is called by do_exit or de_thread, only when there are no more
* references to the shared signal_struct.
*/
void exit_itimers(struct signal_struct *sig)
diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c
index 2d5c4567644..10bc5ec496d 100644
--- a/kernel/power/swsusp.c
+++ b/kernel/power/swsusp.c
@@ -1095,7 +1095,7 @@ static inline void eat_page(void *page)
*eaten_memory = c;
}
-unsigned long get_usable_page(unsigned gfp_mask)
+unsigned long get_usable_page(gfp_t gfp_mask)
{
unsigned long m;
diff --git a/kernel/sched.c b/kernel/sched.c
index 1f31a528fdb..1e5cafdf4e2 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3879,6 +3879,7 @@ EXPORT_SYMBOL(cpu_present_map);
#ifndef CONFIG_SMP
cpumask_t cpu_online_map = CPU_MASK_ALL;
+EXPORT_SYMBOL_GPL(cpu_online_map);
cpumask_t cpu_possible_map = CPU_MASK_ALL;
#endif
diff --git a/kernel/signal.c b/kernel/signal.c
index 50c99264377..f2b96b08fb4 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -397,20 +397,8 @@ void __exit_signal(struct task_struct *tsk)
flush_sigqueue(&tsk->pending);
if (sig) {
/*
- * We are cleaning up the signal_struct here. We delayed
- * calling exit_itimers until after flush_sigqueue, just in
- * case our thread-local pending queue contained a queued
- * timer signal that would have been cleared in
- * exit_itimers. When that called sigqueue_free, it would
- * attempt to re-take the tasklist_lock and deadlock. This
- * can never happen if we ensure that all queues the
- * timer's signal might be queued on have been flushed
- * first. The shared_pending queue, and our own pending
- * queue are the only queues the timer could be on, since
- * there are no other threads left in the group and timer
- * signals are constrained to threads inside the group.
+ * We are cleaning up the signal_struct here.
*/
- exit_itimers(sig);
exit_thread_group_keys(sig);
kmem_cache_free(signal_cachep, sig);
}
diff --git a/lib/idr.c b/lib/idr.c
index 6415d053e2b..6414b2fb482 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -72,7 +72,7 @@ static void free_layer(struct idr *idp, struct idr_layer *p)
* If the system is REALLY out of memory this function returns 0,
* otherwise 1.
*/
-int idr_pre_get(struct idr *idp, unsigned gfp_mask)
+int idr_pre_get(struct idr *idp, gfp_t gfp_mask)
{
while (idp->id_free_cnt < IDR_FREE_MAX) {
struct idr_layer *new;
@@ -346,6 +346,19 @@ void idr_remove(struct idr *idp, int id)
EXPORT_SYMBOL(idr_remove);
/**
+ * idr_destroy - release all cached layers within an idr tree
+ * idp: idr handle
+ */
+void idr_destroy(struct idr *idp)
+{
+ while (idp->id_free_cnt) {
+ struct idr_layer *p = alloc_layer(idp);
+ kmem_cache_free(idr_layer_cache, p);
+ }
+}
+EXPORT_SYMBOL(idr_destroy);
+
+/**
* idr_find - return pointer for given id
* @idp: idr handle
* @id: lookup key
diff --git a/lib/kobject.c b/lib/kobject.c
index dd0917dd9fa..253d3004ace 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -100,7 +100,7 @@ static void fill_kobj_path(struct kobject *kobj, char *path, int length)
* @kobj: kobject in question, with which to build the path
* @gfp_mask: the allocation type used to allocate the path
*/
-char *kobject_get_path(struct kobject *kobj, int gfp_mask)
+char *kobject_get_path(struct kobject *kobj, gfp_t gfp_mask)
{
char *path;
int len;
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 04ca4429ddf..7ef6f6a17aa 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -62,7 +62,7 @@ static struct sock *uevent_sock;
* @gfp_mask:
*/
static int send_uevent(const char *signal, const char *obj,
- char **envp, int gfp_mask)
+ char **envp, gfp_t gfp_mask)
{
struct sk_buff *skb;
char *pos;
@@ -98,7 +98,7 @@ static int send_uevent(const char *signal, const char *obj,
}
static int do_kobject_uevent(struct kobject *kobj, enum kobject_action action,
- struct attribute *attr, int gfp_mask)
+ struct attribute *attr, gfp_t gfp_mask)
{
char *path;
char *attrpath;
diff --git a/lib/textsearch.c b/lib/textsearch.c
index 1e934c196f0..6f3093efbd7 100644
--- a/lib/textsearch.c
+++ b/lib/textsearch.c
@@ -254,7 +254,7 @@ unsigned int textsearch_find_continuous(struct ts_config *conf,
* parameters or a ERR_PTR().
*/
struct ts_config *textsearch_prepare(const char *algo, const void *pattern,
- unsigned int len, int gfp_mask, int flags)
+ unsigned int len, gfp_t gfp_mask, int flags)
{
int err = -ENOENT;
struct ts_config *conf;
diff --git a/mm/filemap.c b/mm/filemap.c
index b5346576e58..1c31b2fd2ca 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -377,7 +377,7 @@ int filemap_write_and_wait_range(struct address_space *mapping,
* This function does not add the page to the LRU. The caller must do that.
*/
int add_to_page_cache(struct page *page, struct address_space *mapping,
- pgoff_t offset, int gfp_mask)
+ pgoff_t offset, gfp_t gfp_mask)
{
int error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
@@ -401,7 +401,7 @@ int add_to_page_cache(struct page *page, struct address_space *mapping,
EXPORT_SYMBOL(add_to_page_cache);
int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
- pgoff_t offset, int gfp_mask)
+ pgoff_t offset, gfp_t gfp_mask)
{
int ret = add_to_page_cache(page, mapping, offset, gfp_mask);
if (ret == 0)
@@ -591,7 +591,7 @@ EXPORT_SYMBOL(find_lock_page);
* memory exhaustion.
*/
struct page *find_or_create_page(struct address_space *mapping,
- unsigned long index, unsigned int gfp_mask)
+ unsigned long index, gfp_t gfp_mask)
{
struct page *page, *cached_page = NULL;
int err;
@@ -683,7 +683,7 @@ struct page *
grab_cache_page_nowait(struct address_space *mapping, unsigned long index)
{
struct page *page = find_get_page(mapping, index);
- unsigned int gfp_mask;
+ gfp_t gfp_mask;
if (page) {
if (!TestSetPageLocked(page))
diff --git a/mm/highmem.c b/mm/highmem.c
index 90e1861e2da..ce2e7e8bbfa 100644
--- a/mm/highmem.c
+++ b/mm/highmem.c
@@ -30,11 +30,9 @@
static mempool_t *page_pool, *isa_page_pool;
-static void *page_pool_alloc(gfp_t gfp_mask, void *data)
+static void *page_pool_alloc_isa(gfp_t gfp_mask, void *data)
{
- unsigned int gfp = gfp_mask | (unsigned int) (long) data;
-
- return alloc_page(gfp);
+ return alloc_page(gfp_mask | GFP_DMA);
}
static void page_pool_free(void *page, void *data)
@@ -51,6 +49,12 @@ static void page_pool_free(void *page, void *data)
* n means that there are (n-1) current users of it.
*/
#ifdef CONFIG_HIGHMEM
+
+static void *page_pool_alloc(gfp_t gfp_mask, void *data)
+{
+ return alloc_page(gfp_mask);
+}
+
static int pkmap_count[LAST_PKMAP];
static unsigned int last_pkmap_nr;
static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kmap_lock);
@@ -267,7 +271,7 @@ int init_emergency_isa_pool(void)
if (isa_page_pool)
return 0;
- isa_page_pool = mempool_create(ISA_POOL_SIZE, page_pool_alloc, page_pool_free, (void *) __GFP_DMA);
+ isa_page_pool = mempool_create(ISA_POOL_SIZE, page_pool_alloc_isa, page_pool_free, NULL);
if (!isa_page_pool)
BUG();
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index a1b30d45459..61d38067803 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -394,6 +394,28 @@ out:
return ret;
}
+/*
+ * On ia64 at least, it is possible to receive a hugetlb fault from a
+ * stale zero entry left in the TLB from earlier hardware prefetching.
+ * Low-level arch code should already have flushed the stale entry as
+ * part of its fault handling, but we do need to accept this minor fault
+ * and return successfully. Whereas the "normal" case is that this is
+ * an access to a hugetlb page which has been truncated off since mmap.
+ */
+int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+ unsigned long address, int write_access)
+{
+ int ret = VM_FAULT_SIGBUS;
+ pte_t *pte;
+
+ spin_lock(&mm->page_table_lock);
+ pte = huge_pte_offset(mm, address);
+ if (pte && !pte_none(*pte))
+ ret = VM_FAULT_MINOR;
+ spin_unlock(&mm->page_table_lock);
+ return ret;
+}
+
int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
struct page **pages, struct vm_area_struct **vmas,
unsigned long *position, int *length, int i)
diff --git a/mm/memory.c b/mm/memory.c
index 8c88b973abc..1db40e935e5 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2045,18 +2045,8 @@ int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct * vma,
inc_page_state(pgfault);
- if (unlikely(is_vm_hugetlb_page(vma))) {
- if (valid_hugetlb_file_off(vma, address))
- /* We get here only if there was a stale(zero) TLB entry
- * (because of HW prefetching).
- * Low-level arch code (if needed) should have already
- * purged the stale entry as part of this fault handling.
- * Here we just return.
- */
- return VM_FAULT_MINOR;
- else
- return VM_FAULT_SIGBUS; /* mapping truncation does this. */
- }
+ if (unlikely(is_vm_hugetlb_page(vma)))
+ return hugetlb_fault(mm, vma, address, write_access);
/*
* We need the page table lock to synchronize with kswapd
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 37af443eb09..1d5c64df165 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -700,7 +700,7 @@ static struct zonelist *zonelist_policy(gfp_t gfp, struct mempolicy *policy)
case MPOL_BIND:
/* Lower zones don't get a policy applied */
/* Careful: current->mems_allowed might have moved */
- if ((gfp & GFP_ZONEMASK) >= policy_zone)
+ if (gfp_zone(gfp) >= policy_zone)
if (cpuset_zonelist_valid_mems_allowed(policy->v.zonelist))
return policy->v.zonelist;
/*FALL THROUGH*/
@@ -712,7 +712,7 @@ static struct zonelist *zonelist_policy(gfp_t gfp, struct mempolicy *policy)
nd = 0;
BUG();
}
- return NODE_DATA(nd)->node_zonelists + (gfp & GFP_ZONEMASK);
+ return NODE_DATA(nd)->node_zonelists + gfp_zone(gfp);
}
/* Do dynamic interleaving for a process */
@@ -757,7 +757,7 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, unsigned ni
struct page *page;
BUG_ON(!node_online(nid));
- zl = NODE_DATA(nid)->node_zonelists + (gfp & GFP_ZONEMASK);
+ zl = NODE_DATA(nid)->node_zonelists + gfp_zone(gfp);
page = __alloc_pages(gfp, order, zl);
if (page && page_zone(page) == zl->zones[0]) {
zone_pcp(zl->zones[0],get_cpu())->interleave_hit++;
diff --git a/mm/mempool.c b/mm/mempool.c
index 9e377ea700b..1a99b80480d 100644
--- a/mm/mempool.c
+++ b/mm/mempool.c
@@ -205,7 +205,7 @@ void * mempool_alloc(mempool_t *pool, gfp_t gfp_mask)
void *element;
unsigned long flags;
wait_queue_t wait;
- unsigned int gfp_temp;
+ gfp_t gfp_temp;
might_sleep_if(gfp_mask & __GFP_WAIT);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index cc1fe2672a3..94c864eac9c 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -734,7 +734,7 @@ buffered_rmqueue(struct zone *zone, int order, gfp_t gfp_flags)
* of the allocation.
*/
int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
- int classzone_idx, int can_try_harder, int gfp_high)
+ int classzone_idx, int can_try_harder, gfp_t gfp_high)
{
/* free_pages my go negative - that's OK */
long min = mark, free_pages = z->free_pages - (1 << order) + 1;
@@ -777,7 +777,7 @@ struct page * fastcall
__alloc_pages(gfp_t gfp_mask, unsigned int order,
struct zonelist *zonelist)
{
- const int wait = gfp_mask & __GFP_WAIT;
+ const gfp_t wait = gfp_mask & __GFP_WAIT;
struct zone **zones, *z;
struct page *page;
struct reclaim_state reclaim_state;
@@ -996,7 +996,7 @@ fastcall unsigned long get_zeroed_page(gfp_t gfp_mask)
* get_zeroed_page() returns a 32-bit address, which cannot represent
* a highmem page
*/
- BUG_ON(gfp_mask & __GFP_HIGHMEM);
+ BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
page = alloc_pages(gfp_mask | __GFP_ZERO, 0);
if (page)
@@ -1089,7 +1089,7 @@ static unsigned int nr_free_zone_pages(int offset)
*/
unsigned int nr_free_buffer_pages(void)
{
- return nr_free_zone_pages(GFP_USER & GFP_ZONEMASK);
+ return nr_free_zone_pages(gfp_zone(GFP_USER));
}
/*
@@ -1097,7 +1097,7 @@ unsigned int nr_free_buffer_pages(void)
*/
unsigned int nr_free_pagecache_pages(void)
{
- return nr_free_zone_pages(GFP_HIGHUSER & GFP_ZONEMASK);
+ return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER));
}
#ifdef CONFIG_HIGHMEM
@@ -1428,6 +1428,16 @@ static int __init build_zonelists_node(pg_data_t *pgdat, struct zonelist *zoneli
return j;
}
+static inline int highest_zone(int zone_bits)
+{
+ int res = ZONE_NORMAL;
+ if (zone_bits & (__force int)__GFP_HIGHMEM)
+ res = ZONE_HIGHMEM;
+ if (zone_bits & (__force int)__GFP_DMA)
+ res = ZONE_DMA;
+ return res;
+}
+
#ifdef CONFIG_NUMA
#define MAX_NODE_LOAD (num_online_nodes())
static int __initdata node_load[MAX_NUMNODES];
@@ -1524,11 +1534,7 @@ static void __init build_zonelists(pg_data_t *pgdat)
zonelist = pgdat->node_zonelists + i;
for (j = 0; zonelist->zones[j] != NULL; j++);
- k = ZONE_NORMAL;
- if (i & __GFP_HIGHMEM)
- k = ZONE_HIGHMEM;
- if (i & __GFP_DMA)
- k = ZONE_DMA;
+ k = highest_zone(i);
j = build_zonelists_node(NODE_DATA(node), zonelist, j, k);
zonelist->zones[j] = NULL;
@@ -1549,12 +1555,7 @@ static void __init build_zonelists(pg_data_t *pgdat)
zonelist = pgdat->node_zonelists + i;
j = 0;
- k = ZONE_NORMAL;
- if (i & __GFP_HIGHMEM)
- k = ZONE_HIGHMEM;
- if (i & __GFP_DMA)
- k = ZONE_DMA;
-
+ k = highest_zone(i);
j = build_zonelists_node(pgdat, zonelist, j, k);
/*
* Now we build the zonelist so that it contains the zones
@@ -1750,6 +1751,8 @@ inline void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
{
struct per_cpu_pages *pcp;
+ memset(p, 0, sizeof(*p));
+
pcp = &p->pcp[0]; /* hot */
pcp->count = 0;
pcp->low = 2 * batch;
diff --git a/mm/shmem.c b/mm/shmem.c
index ea064d89cda..55e04a0734c 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -85,7 +85,7 @@ enum sgp_type {
static int shmem_getpage(struct inode *inode, unsigned long idx,
struct page **pagep, enum sgp_type sgp, int *type);
-static inline struct page *shmem_dir_alloc(unsigned int gfp_mask)
+static inline struct page *shmem_dir_alloc(gfp_t gfp_mask)
{
/*
* The above definition of ENTRIES_PER_PAGE, and the use of
@@ -898,7 +898,7 @@ struct page *shmem_swapin(struct shmem_inode_info *info, swp_entry_t entry,
}
static struct page *
-shmem_alloc_page(unsigned long gfp, struct shmem_inode_info *info,
+shmem_alloc_page(gfp_t gfp, struct shmem_inode_info *info,
unsigned long idx)
{
struct vm_area_struct pvma;
diff --git a/mm/slab.c b/mm/slab.c
index d05c678bceb..d30423f167a 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -386,7 +386,7 @@ struct kmem_cache_s {
unsigned int gfporder;
/* force GFP flags, e.g. GFP_DMA */
- unsigned int gfpflags;
+ gfp_t gfpflags;
size_t colour; /* cache colouring range */
unsigned int colour_off; /* colour offset */
@@ -2117,7 +2117,7 @@ static void cache_init_objs(kmem_cache_t *cachep,
slabp->free = 0;
}
-static void kmem_flagcheck(kmem_cache_t *cachep, unsigned int flags)
+static void kmem_flagcheck(kmem_cache_t *cachep, gfp_t flags)
{
if (flags & SLAB_DMA) {
if (!(cachep->gfpflags & GFP_DMA))
@@ -2152,7 +2152,7 @@ static int cache_grow(kmem_cache_t *cachep, gfp_t flags, int nodeid)
struct slab *slabp;
void *objp;
size_t offset;
- unsigned int local_flags;
+ gfp_t local_flags;
unsigned long ctor_flags;
struct kmem_list3 *l3;
@@ -2546,7 +2546,7 @@ static inline void *__cache_alloc(kmem_cache_t *cachep, gfp_t flags)
/*
* A interface to enable slab creation on nodeid
*/
-static void *__cache_alloc_node(kmem_cache_t *cachep, int flags, int nodeid)
+static void *__cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int nodeid)
{
struct list_head *entry;
struct slab *slabp;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 64f9570cff5..843c87d1e61 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -70,7 +70,7 @@ struct scan_control {
unsigned int priority;
/* This context's GFP mask */
- unsigned int gfp_mask;
+ gfp_t gfp_mask;
int may_writepage;
@@ -186,7 +186,7 @@ EXPORT_SYMBOL(remove_shrinker);
*
* Returns the number of slab objects which we shrunk.
*/
-static int shrink_slab(unsigned long scanned, unsigned int gfp_mask,
+static int shrink_slab(unsigned long scanned, gfp_t gfp_mask,
unsigned long lru_pages)
{
struct shrinker *shrinker;
@@ -926,7 +926,7 @@ shrink_caches(struct zone **zones, struct scan_control *sc)
* holds filesystem locks which prevent writeout this might not work, and the
* allocation attempt will fail.
*/
-int try_to_free_pages(struct zone **zones, unsigned int gfp_mask)
+int try_to_free_pages(struct zone **zones, gfp_t gfp_mask)
{
int priority;
int ret = 0;
@@ -1338,7 +1338,7 @@ module_init(kswapd_init)
/*
* Try to free up some pages from this zone through reclaim.
*/
-int zone_reclaim(struct zone *zone, unsigned int gfp_mask, unsigned int order)
+int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
{
struct scan_control sc;
int nr_pages = 1 << order;
diff --git a/net/802/tr.c b/net/802/tr.c
index 1eaa3d19d8b..afd8385c0c9 100644
--- a/net/802/tr.c
+++ b/net/802/tr.c
@@ -340,9 +340,10 @@ static void tr_add_rif_info(struct trh_hdr *trh, struct net_device *dev)
unsigned int hash, rii_p = 0;
unsigned long flags;
struct rif_cache *entry;
-
+ unsigned char saddr0;
spin_lock_irqsave(&rif_lock, flags);
+ saddr0 = trh->saddr[0];
/*
* Firstly see if the entry exists
@@ -395,7 +396,6 @@ printk("adding rif_entry: addr:%02X:%02X:%02X:%02X:%02X:%02X rcf:%04X\n",
entry->rcf = trh->rcf & htons((unsigned short)~TR_RCF_BROADCAST_MASK);
memcpy(&(entry->rseg[0]),&(trh->rseg[0]),8*sizeof(unsigned short));
entry->local_ring = 0;
- trh->saddr[0]|=TR_RII; /* put the routing indicator back for tcpdump */
}
else
{
@@ -422,6 +422,7 @@ printk("updating rif_entry: addr:%02X:%02X:%02X:%02X:%02X:%02X rcf:%04X\n",
}
entry->last_used=jiffies;
}
+ trh->saddr[0]=saddr0; /* put the routing indicator back for tcpdump */
spin_unlock_irqrestore(&rif_lock, flags);
}
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 4128fc76ac3..e68700f950a 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -175,39 +175,10 @@ static void pneigh_queue_purge(struct sk_buff_head *list)
}
}
-void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
+static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
{
int i;
- write_lock_bh(&tbl->lock);
-
- for (i=0; i <= tbl->hash_mask; i++) {
- struct neighbour *n, **np;
-
- np = &tbl->hash_buckets[i];
- while ((n = *np) != NULL) {
- if (dev && n->dev != dev) {
- np = &n->next;
- continue;
- }
- *np = n->next;
- write_lock_bh(&n->lock);
- n->dead = 1;
- neigh_del_timer(n);
- write_unlock_bh(&n->lock);
- neigh_release(n);
- }
- }
-
- write_unlock_bh(&tbl->lock);
-}
-
-int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
-{
- int i;
-
- write_lock_bh(&tbl->lock);
-
for (i = 0; i <= tbl->hash_mask; i++) {
struct neighbour *n, **np = &tbl->hash_buckets[i];
@@ -243,7 +214,19 @@ int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
neigh_release(n);
}
}
+}
+
+void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
+{
+ write_lock_bh(&tbl->lock);
+ neigh_flush_dev(tbl, dev);
+ write_unlock_bh(&tbl->lock);
+}
+int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
+{
+ write_lock_bh(&tbl->lock);
+ neigh_flush_dev(tbl, dev);
pneigh_ifdown(tbl, dev);
write_unlock_bh(&tbl->lock);
@@ -732,6 +715,7 @@ static inline void neigh_add_timer(struct neighbour *n, unsigned long when)
if (unlikely(mod_timer(&n->timer, when))) {
printk("NEIGH: BUG, double timer add, state is %x\n",
n->nud_state);
+ dump_stack();
}
}
@@ -815,10 +799,10 @@ static void neigh_timer_handler(unsigned long arg)
}
if (neigh->nud_state & NUD_IN_TIMER) {
- neigh_hold(neigh);
if (time_before(next, jiffies + HZ/2))
next = jiffies + HZ/2;
- neigh_add_timer(neigh, next);
+ if (!mod_timer(&neigh->timer, next))
+ neigh_hold(neigh);
}
if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
struct sk_buff *skb = skb_peek(&neigh->arp_queue);
@@ -1641,12 +1625,9 @@ static int neightbl_fill_info(struct neigh_table *tbl, struct sk_buff *skb,
memset(&ndst, 0, sizeof(ndst));
- for (cpu = 0; cpu < NR_CPUS; cpu++) {
+ for_each_cpu(cpu) {
struct neigh_statistics *st;
- if (!cpu_possible(cpu))
- continue;
-
st = per_cpu_ptr(tbl->stats, cpu);
ndst.ndts_allocs += st->allocs;
ndst.ndts_destroys += st->destroys;
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 5f043d34669..7fc3e9e28c3 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -75,7 +75,7 @@
* By design there should only be *one* "controlling" process. In practice
* multiple write accesses gives unpredictable result. Understood by "write"
* to /proc gives result code thats should be read be the "writer".
- * For pratical use this should be no problem.
+ * For practical use this should be no problem.
*
* Note when adding devices to a specific CPU there good idea to also assign
* /proc/irq/XX/smp_affinity so TX-interrupts gets bound to the same CPU.
@@ -96,7 +96,7 @@
* New xmit() return, do_div and misc clean up by Stephen Hemminger
* <shemminger@osdl.org> 040923
*
- * Rany Dunlap fixed u64 printk compiler waring
+ * Randy Dunlap fixed u64 printk compiler waring
*
* Remove FCS from BW calculation. Lennert Buytenhek <buytenh@wantstofly.org>
* New time handling. Lennert Buytenhek <buytenh@wantstofly.org> 041213
@@ -137,6 +137,7 @@
#include <linux/ipv6.h>
#include <linux/udp.h>
#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
#include <linux/wait.h>
#include <net/checksum.h>
#include <net/ipv6.h>
@@ -151,7 +152,7 @@
#include <asm/timex.h>
-#define VERSION "pktgen v2.62: Packet Generator for packet performance testing.\n"
+#define VERSION "pktgen v2.63: Packet Generator for packet performance testing.\n"
/* #define PG_DEBUG(a) a */
#define PG_DEBUG(a)
@@ -177,8 +178,8 @@
#define T_REMDEV (1<<3) /* Remove all devs */
/* Locks */
-#define thread_lock() spin_lock(&_thread_lock)
-#define thread_unlock() spin_unlock(&_thread_lock)
+#define thread_lock() down(&pktgen_sem)
+#define thread_unlock() up(&pktgen_sem)
/* If lock -- can be removed after some work */
#define if_lock(t) spin_lock(&(t->if_lock));
@@ -186,7 +187,9 @@
/* Used to help with determining the pkts on receive */
#define PKTGEN_MAGIC 0xbe9be955
-#define PG_PROC_DIR "net/pktgen"
+#define PG_PROC_DIR "pktgen"
+#define PGCTRL "pgctrl"
+static struct proc_dir_entry *pg_proc_dir = NULL;
#define MAX_CFLOWS 65536
@@ -202,11 +205,8 @@ struct pktgen_dev {
* Try to keep frequent/infrequent used vars. separated.
*/
- char ifname[32];
- struct proc_dir_entry *proc_ent;
+ char ifname[IFNAMSIZ];
char result[512];
- /* proc file names */
- char fname[80];
struct pktgen_thread* pg_thread; /* the owner */
struct pktgen_dev *next; /* Used for chaining in the thread's run-queue */
@@ -244,7 +244,7 @@ struct pktgen_dev {
__u32 seq_num;
int clone_skb; /* Use multiple SKBs during packet gen. If this number
- * is greater than 1, then that many coppies of the same
+ * is greater than 1, then that many copies of the same
* packet will be sent before a new packet is allocated.
* For instance, if you want to send 1024 identical packets
* before creating a new packet, set clone_skb to 1024.
@@ -330,8 +330,6 @@ struct pktgen_thread {
struct pktgen_dev *if_list; /* All device here */
struct pktgen_thread* next;
char name[32];
- char fname[128]; /* name of proc file */
- struct proc_dir_entry *proc_ent;
char result[512];
u32 max_before_softirq; /* We'll call do_softirq to prevent starvation. */
@@ -396,7 +394,7 @@ static inline s64 divremdi3(s64 x, s64 y, int type)
/* End of hacks to deal with 64-bit math on x86 */
-/** Convert to miliseconds */
+/** Convert to milliseconds */
static inline __u64 tv_to_ms(const struct timeval* tv)
{
__u64 ms = tv->tv_usec / 1000;
@@ -425,7 +423,7 @@ static inline __u64 pg_div64(__u64 n, __u64 base)
{
__u64 tmp = n;
/*
- * How do we know if the architectrure we are running on
+ * How do we know if the architecture we are running on
* supports division with 64 bit base?
*
*/
@@ -473,16 +471,6 @@ static inline __u64 tv_diff(const struct timeval* a, const struct timeval* b)
static char version[] __initdata = VERSION;
-static ssize_t proc_pgctrl_read(struct file* file, char __user * buf, size_t count, loff_t *ppos);
-static ssize_t proc_pgctrl_write(struct file* file, const char __user * buf, size_t count, loff_t *ppos);
-static int proc_if_read(char *buf , char **start, off_t offset, int len, int *eof, void *data);
-
-static int proc_thread_read(char *buf , char **start, off_t offset, int len, int *eof, void *data);
-static int proc_if_write(struct file *file, const char __user *user_buffer, unsigned long count, void *data);
-static int proc_thread_write(struct file *file, const char __user *user_buffer, unsigned long count, void *data);
-static int create_proc_dir(void);
-static int remove_proc_dir(void);
-
static int pktgen_remove_device(struct pktgen_thread* t, struct pktgen_dev *i);
static int pktgen_add_device(struct pktgen_thread* t, const char* ifname);
static struct pktgen_thread* pktgen_find_thread(const char* name);
@@ -503,83 +491,41 @@ static int pg_delay_d = 0;
static int pg_clone_skb_d = 0;
static int debug = 0;
-static DEFINE_SPINLOCK(_thread_lock);
+static DECLARE_MUTEX(pktgen_sem);
static struct pktgen_thread *pktgen_threads = NULL;
-static char module_fname[128];
-static struct proc_dir_entry *module_proc_ent = NULL;
-
static struct notifier_block pktgen_notifier_block = {
.notifier_call = pktgen_device_event,
};
-static struct file_operations pktgen_fops = {
- .read = proc_pgctrl_read,
- .write = proc_pgctrl_write,
- /* .ioctl = pktgen_ioctl, later maybe */
-};
-
/*
* /proc handling functions
*
*/
-static struct proc_dir_entry *pg_proc_dir = NULL;
-static int proc_pgctrl_read_eof=0;
-
-static ssize_t proc_pgctrl_read(struct file* file, char __user * buf,
- size_t count, loff_t *ppos)
+static int pgctrl_show(struct seq_file *seq, void *v)
{
- char data[200];
- int len = 0;
-
- if(proc_pgctrl_read_eof) {
- proc_pgctrl_read_eof=0;
- len = 0;
- goto out;
- }
-
- sprintf(data, "%s", VERSION);
-
- len = strlen(data);
-
- if(len > count) {
- len =-EFAULT;
- goto out;
- }
-
- if (copy_to_user(buf, data, len)) {
- len =-EFAULT;
- goto out;
- }
-
- *ppos += len;
- proc_pgctrl_read_eof=1; /* EOF next call */
-
- out:
- return len;
+ seq_puts(seq, VERSION);
+ return 0;
}
-static ssize_t proc_pgctrl_write(struct file* file,const char __user * buf,
- size_t count, loff_t *ppos)
+static ssize_t pgctrl_write(struct file* file,const char __user * buf,
+ size_t count, loff_t *ppos)
{
- char *data = NULL;
int err = 0;
+ char data[128];
if (!capable(CAP_NET_ADMIN)){
err = -EPERM;
goto out;
}
- data = (void*)vmalloc ((unsigned int)count);
+ if (count > sizeof(data))
+ count = sizeof(data);
- if(!data) {
- err = -ENOMEM;
- goto out;
- }
if (copy_from_user(data, buf, count)) {
- err =-EFAULT;
- goto out_free;
+ err = -EFAULT;
+ goto out;
}
data[count-1] = 0; /* Make string */
@@ -594,31 +540,40 @@ static ssize_t proc_pgctrl_write(struct file* file,const char __user * buf,
err = count;
- out_free:
- vfree (data);
out:
return err;
}
-static int proc_if_read(char *buf , char **start, off_t offset,
- int len, int *eof, void *data)
+static int pgctrl_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, pgctrl_show, PDE(inode)->data);
+}
+
+static struct file_operations pktgen_fops = {
+ .owner = THIS_MODULE,
+ .open = pgctrl_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .write = pgctrl_write,
+ .release = single_release,
+};
+
+static int pktgen_if_show(struct seq_file *seq, void *v)
{
- char *p;
int i;
- struct pktgen_dev *pkt_dev = (struct pktgen_dev*)(data);
+ struct pktgen_dev *pkt_dev = seq->private;
__u64 sa;
__u64 stopped;
__u64 now = getCurUs();
- p = buf;
- p += sprintf(p, "Params: count %llu min_pkt_size: %u max_pkt_size: %u\n",
- (unsigned long long) pkt_dev->count,
- pkt_dev->min_pkt_size, pkt_dev->max_pkt_size);
+ seq_printf(seq, "Params: count %llu min_pkt_size: %u max_pkt_size: %u\n",
+ (unsigned long long) pkt_dev->count,
+ pkt_dev->min_pkt_size, pkt_dev->max_pkt_size);
- p += sprintf(p, " frags: %d delay: %u clone_skb: %d ifname: %s\n",
- pkt_dev->nfrags, 1000*pkt_dev->delay_us+pkt_dev->delay_ns, pkt_dev->clone_skb, pkt_dev->ifname);
+ seq_printf(seq, " frags: %d delay: %u clone_skb: %d ifname: %s\n",
+ pkt_dev->nfrags, 1000*pkt_dev->delay_us+pkt_dev->delay_ns, pkt_dev->clone_skb, pkt_dev->ifname);
- p += sprintf(p, " flows: %u flowlen: %u\n", pkt_dev->cflows, pkt_dev->lflow);
+ seq_printf(seq, " flows: %u flowlen: %u\n", pkt_dev->cflows, pkt_dev->lflow);
if(pkt_dev->flags & F_IPV6) {
@@ -626,19 +581,19 @@ static int proc_if_read(char *buf , char **start, off_t offset,
fmt_ip6(b1, pkt_dev->in6_saddr.s6_addr);
fmt_ip6(b2, pkt_dev->min_in6_saddr.s6_addr);
fmt_ip6(b3, pkt_dev->max_in6_saddr.s6_addr);
- p += sprintf(p, " saddr: %s min_saddr: %s max_saddr: %s\n", b1, b2, b3);
+ seq_printf(seq, " saddr: %s min_saddr: %s max_saddr: %s\n", b1, b2, b3);
fmt_ip6(b1, pkt_dev->in6_daddr.s6_addr);
fmt_ip6(b2, pkt_dev->min_in6_daddr.s6_addr);
fmt_ip6(b3, pkt_dev->max_in6_daddr.s6_addr);
- p += sprintf(p, " daddr: %s min_daddr: %s max_daddr: %s\n", b1, b2, b3);
+ seq_printf(seq, " daddr: %s min_daddr: %s max_daddr: %s\n", b1, b2, b3);
}
else
- p += sprintf(p, " dst_min: %s dst_max: %s\n src_min: %s src_max: %s\n",
- pkt_dev->dst_min, pkt_dev->dst_max, pkt_dev->src_min, pkt_dev->src_max);
+ seq_printf(seq," dst_min: %s dst_max: %s\n src_min: %s src_max: %s\n",
+ pkt_dev->dst_min, pkt_dev->dst_max, pkt_dev->src_min, pkt_dev->src_max);
- p += sprintf(p, " src_mac: ");
+ seq_puts(seq, " src_mac: ");
if ((pkt_dev->src_mac[0] == 0) &&
(pkt_dev->src_mac[1] == 0) &&
@@ -648,89 +603,89 @@ static int proc_if_read(char *buf , char **start, off_t offset,
(pkt_dev->src_mac[5] == 0))
for (i = 0; i < 6; i++)
- p += sprintf(p, "%02X%s", pkt_dev->odev->dev_addr[i], i == 5 ? " " : ":");
+ seq_printf(seq, "%02X%s", pkt_dev->odev->dev_addr[i], i == 5 ? " " : ":");
else
for (i = 0; i < 6; i++)
- p += sprintf(p, "%02X%s", pkt_dev->src_mac[i], i == 5 ? " " : ":");
+ seq_printf(seq, "%02X%s", pkt_dev->src_mac[i], i == 5 ? " " : ":");
- p += sprintf(p, "dst_mac: ");
+ seq_printf(seq, "dst_mac: ");
for (i = 0; i < 6; i++)
- p += sprintf(p, "%02X%s", pkt_dev->dst_mac[i], i == 5 ? "\n" : ":");
+ seq_printf(seq, "%02X%s", pkt_dev->dst_mac[i], i == 5 ? "\n" : ":");
- p += sprintf(p, " udp_src_min: %d udp_src_max: %d udp_dst_min: %d udp_dst_max: %d\n",
- pkt_dev->udp_src_min, pkt_dev->udp_src_max, pkt_dev->udp_dst_min,
- pkt_dev->udp_dst_max);
+ seq_printf(seq, " udp_src_min: %d udp_src_max: %d udp_dst_min: %d udp_dst_max: %d\n",
+ pkt_dev->udp_src_min, pkt_dev->udp_src_max, pkt_dev->udp_dst_min,
+ pkt_dev->udp_dst_max);
- p += sprintf(p, " src_mac_count: %d dst_mac_count: %d \n Flags: ",
- pkt_dev->src_mac_count, pkt_dev->dst_mac_count);
+ seq_printf(seq, " src_mac_count: %d dst_mac_count: %d \n Flags: ",
+ pkt_dev->src_mac_count, pkt_dev->dst_mac_count);
if (pkt_dev->flags & F_IPV6)
- p += sprintf(p, "IPV6 ");
+ seq_printf(seq, "IPV6 ");
if (pkt_dev->flags & F_IPSRC_RND)
- p += sprintf(p, "IPSRC_RND ");
+ seq_printf(seq, "IPSRC_RND ");
if (pkt_dev->flags & F_IPDST_RND)
- p += sprintf(p, "IPDST_RND ");
+ seq_printf(seq, "IPDST_RND ");
if (pkt_dev->flags & F_TXSIZE_RND)
- p += sprintf(p, "TXSIZE_RND ");
+ seq_printf(seq, "TXSIZE_RND ");
if (pkt_dev->flags & F_UDPSRC_RND)
- p += sprintf(p, "UDPSRC_RND ");
+ seq_printf(seq, "UDPSRC_RND ");
if (pkt_dev->flags & F_UDPDST_RND)
- p += sprintf(p, "UDPDST_RND ");
+ seq_printf(seq, "UDPDST_RND ");
if (pkt_dev->flags & F_MACSRC_RND)
- p += sprintf(p, "MACSRC_RND ");
+ seq_printf(seq, "MACSRC_RND ");
if (pkt_dev->flags & F_MACDST_RND)
- p += sprintf(p, "MACDST_RND ");
+ seq_printf(seq, "MACDST_RND ");
- p += sprintf(p, "\n");
+ seq_puts(seq, "\n");
sa = pkt_dev->started_at;
stopped = pkt_dev->stopped_at;
if (pkt_dev->running)
stopped = now; /* not really stopped, more like last-running-at */
- p += sprintf(p, "Current:\n pkts-sofar: %llu errors: %llu\n started: %lluus stopped: %lluus idle: %lluus\n",
- (unsigned long long) pkt_dev->sofar,
- (unsigned long long) pkt_dev->errors,
- (unsigned long long) sa,
- (unsigned long long) stopped,
- (unsigned long long) pkt_dev->idle_acc);
+ seq_printf(seq, "Current:\n pkts-sofar: %llu errors: %llu\n started: %lluus stopped: %lluus idle: %lluus\n",
+ (unsigned long long) pkt_dev->sofar,
+ (unsigned long long) pkt_dev->errors,
+ (unsigned long long) sa,
+ (unsigned long long) stopped,
+ (unsigned long long) pkt_dev->idle_acc);
- p += sprintf(p, " seq_num: %d cur_dst_mac_offset: %d cur_src_mac_offset: %d\n",
- pkt_dev->seq_num, pkt_dev->cur_dst_mac_offset, pkt_dev->cur_src_mac_offset);
+ seq_printf(seq, " seq_num: %d cur_dst_mac_offset: %d cur_src_mac_offset: %d\n",
+ pkt_dev->seq_num, pkt_dev->cur_dst_mac_offset,
+ pkt_dev->cur_src_mac_offset);
if(pkt_dev->flags & F_IPV6) {
char b1[128], b2[128];
fmt_ip6(b1, pkt_dev->cur_in6_daddr.s6_addr);
fmt_ip6(b2, pkt_dev->cur_in6_saddr.s6_addr);
- p += sprintf(p, " cur_saddr: %s cur_daddr: %s\n", b2, b1);
+ seq_printf(seq, " cur_saddr: %s cur_daddr: %s\n", b2, b1);
}
else
- p += sprintf(p, " cur_saddr: 0x%x cur_daddr: 0x%x\n",
- pkt_dev->cur_saddr, pkt_dev->cur_daddr);
+ seq_printf(seq, " cur_saddr: 0x%x cur_daddr: 0x%x\n",
+ pkt_dev->cur_saddr, pkt_dev->cur_daddr);
- p += sprintf(p, " cur_udp_dst: %d cur_udp_src: %d\n",
- pkt_dev->cur_udp_dst, pkt_dev->cur_udp_src);
+ seq_printf(seq, " cur_udp_dst: %d cur_udp_src: %d\n",
+ pkt_dev->cur_udp_dst, pkt_dev->cur_udp_src);
- p += sprintf(p, " flows: %u\n", pkt_dev->nflows);
+ seq_printf(seq, " flows: %u\n", pkt_dev->nflows);
if (pkt_dev->result[0])
- p += sprintf(p, "Result: %s\n", pkt_dev->result);
+ seq_printf(seq, "Result: %s\n", pkt_dev->result);
else
- p += sprintf(p, "Result: Idle\n");
- *eof = 1;
+ seq_printf(seq, "Result: Idle\n");
- return p - buf;
+ return 0;
}
@@ -802,13 +757,14 @@ done_str:
return i;
}
-static int proc_if_write(struct file *file, const char __user *user_buffer,
- unsigned long count, void *data)
+static ssize_t pktgen_if_write(struct file *file, const char __user *user_buffer,
+ size_t count, loff_t *offset)
{
+ struct seq_file *seq = (struct seq_file *) file->private_data;
+ struct pktgen_dev *pkt_dev = seq->private;
int i = 0, max, len;
char name[16], valstr[32];
unsigned long value = 0;
- struct pktgen_dev *pkt_dev = (struct pktgen_dev*)(data);
char* pg_result = NULL;
int tmp = 0;
char buf[128];
@@ -849,7 +805,8 @@ static int proc_if_write(struct file *file, const char __user *user_buffer,
if (copy_from_user(tb, user_buffer, count))
return -EFAULT;
tb[count] = 0;
- printk("pktgen: %s,%lu buffer -:%s:-\n", name, count, tb);
+ printk("pktgen: %s,%lu buffer -:%s:-\n", name,
+ (unsigned long) count, tb);
}
if (!strcmp(name, "min_pkt_size")) {
@@ -1335,92 +1292,98 @@ static int proc_if_write(struct file *file, const char __user *user_buffer,
return -EINVAL;
}
-static int proc_thread_read(char *buf , char **start, off_t offset,
- int len, int *eof, void *data)
+static int pktgen_if_open(struct inode *inode, struct file *file)
{
- char *p;
- struct pktgen_thread *t = (struct pktgen_thread*)(data);
- struct pktgen_dev *pkt_dev = NULL;
+ return single_open(file, pktgen_if_show, PDE(inode)->data);
+}
+static struct file_operations pktgen_if_fops = {
+ .owner = THIS_MODULE,
+ .open = pktgen_if_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .write = pktgen_if_write,
+ .release = single_release,
+};
- if (!t) {
- printk("pktgen: ERROR: could not find thread in proc_thread_read\n");
- return -EINVAL;
- }
+static int pktgen_thread_show(struct seq_file *seq, void *v)
+{
+ struct pktgen_thread *t = seq->private;
+ struct pktgen_dev *pkt_dev = NULL;
+
+ BUG_ON(!t);
- p = buf;
- p += sprintf(p, "Name: %s max_before_softirq: %d\n",
+ seq_printf(seq, "Name: %s max_before_softirq: %d\n",
t->name, t->max_before_softirq);
- p += sprintf(p, "Running: ");
+ seq_printf(seq, "Running: ");
if_lock(t);
for(pkt_dev = t->if_list;pkt_dev; pkt_dev = pkt_dev->next)
if(pkt_dev->running)
- p += sprintf(p, "%s ", pkt_dev->ifname);
+ seq_printf(seq, "%s ", pkt_dev->ifname);
- p += sprintf(p, "\nStopped: ");
+ seq_printf(seq, "\nStopped: ");
for(pkt_dev = t->if_list;pkt_dev; pkt_dev = pkt_dev->next)
if(!pkt_dev->running)
- p += sprintf(p, "%s ", pkt_dev->ifname);
+ seq_printf(seq, "%s ", pkt_dev->ifname);
if (t->result[0])
- p += sprintf(p, "\nResult: %s\n", t->result);
+ seq_printf(seq, "\nResult: %s\n", t->result);
else
- p += sprintf(p, "\nResult: NA\n");
-
- *eof = 1;
+ seq_printf(seq, "\nResult: NA\n");
if_unlock(t);
- return p - buf;
+ return 0;
}
-static int proc_thread_write(struct file *file, const char __user *user_buffer,
- unsigned long count, void *data)
+static ssize_t pktgen_thread_write(struct file *file,
+ const char __user *user_buffer,
+ size_t count, loff_t *offset)
{
+ struct seq_file *seq = (struct seq_file *) file->private_data;
+ struct pktgen_thread *t = seq->private;
int i = 0, max, len, ret;
char name[40];
- struct pktgen_thread *t;
char *pg_result;
unsigned long value = 0;
-
+
if (count < 1) {
// sprintf(pg_result, "Wrong command format");
return -EINVAL;
}
-
+
max = count - i;
len = count_trail_chars(&user_buffer[i], max);
- if (len < 0)
- return len;
-
+ if (len < 0)
+ return len;
+
i += len;
-
+
/* Read variable name */
len = strn_len(&user_buffer[i], sizeof(name) - 1);
- if (len < 0)
- return len;
+ if (len < 0)
+ return len;
memset(name, 0, sizeof(name));
if (copy_from_user(name, &user_buffer[i], len))
return -EFAULT;
i += len;
-
+
max = count -i;
len = count_trail_chars(&user_buffer[i], max);
- if (len < 0)
- return len;
-
+ if (len < 0)
+ return len;
+
i += len;
- if (debug)
- printk("pktgen: t=%s, count=%lu\n", name, count);
-
+ if (debug)
+ printk("pktgen: t=%s, count=%lu\n", name,
+ (unsigned long) count);
- t = (struct pktgen_thread*)(data);
if(!t) {
printk("pktgen: ERROR: No thread\n");
ret = -EINVAL;
@@ -1474,21 +1437,19 @@ static int proc_thread_write(struct file *file, const char __user *user_buffer,
return ret;
}
-static int create_proc_dir(void)
+static int pktgen_thread_open(struct inode *inode, struct file *file)
{
- pg_proc_dir = proc_mkdir(PG_PROC_DIR, NULL);
-
- if (!pg_proc_dir)
- return -ENODEV;
-
- return 0;
+ return single_open(file, pktgen_thread_show, PDE(inode)->data);
}
-static int remove_proc_dir(void)
-{
- remove_proc_entry(PG_PROC_DIR, NULL);
- return 0;
-}
+static struct file_operations pktgen_thread_fops = {
+ .owner = THIS_MODULE,
+ .open = pktgen_thread_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .write = pktgen_thread_write,
+ .release = single_release,
+};
/* Think find or remove for NN */
static struct pktgen_dev *__pktgen_NN_threads(const char* ifname, int remove)
@@ -1702,7 +1663,7 @@ static void spin(struct pktgen_dev *pkt_dev, __u64 spin_until_us)
start = now = getCurUs();
printk(KERN_INFO "sleeping for %d\n", (int)(spin_until_us - now));
while (now < spin_until_us) {
- /* TODO: optimise sleeping behavior */
+ /* TODO: optimize sleeping behavior */
if (spin_until_us - now > jiffies_to_usecs(1)+1)
schedule_timeout_interruptible(1);
else if (spin_until_us - now > 100) {
@@ -2361,7 +2322,7 @@ static void pktgen_stop_all_threads_ifs(void)
pktgen_stop(t);
t = t->next;
}
- thread_unlock();
+ thread_unlock();
}
static int thread_is_running(struct pktgen_thread *t )
@@ -2552,10 +2513,9 @@ static void pktgen_rem_thread(struct pktgen_thread *t)
struct pktgen_thread *tmp = pktgen_threads;
- if (strlen(t->fname))
- remove_proc_entry(t->fname, NULL);
+ remove_proc_entry(t->name, pg_proc_dir);
- thread_lock();
+ thread_lock();
if (tmp == t)
pktgen_threads = tmp->next;
@@ -2825,7 +2785,7 @@ static struct pktgen_dev *pktgen_find_dev(struct pktgen_thread *t, const char* i
if_lock(t);
for(pkt_dev=t->if_list; pkt_dev; pkt_dev = pkt_dev->next ) {
- if (strcmp(pkt_dev->ifname, ifname) == 0) {
+ if (strncmp(pkt_dev->ifname, ifname, IFNAMSIZ) == 0) {
break;
}
}
@@ -2864,74 +2824,70 @@ static int add_dev_to_thread(struct pktgen_thread *t, struct pktgen_dev *pkt_dev
static int pktgen_add_device(struct pktgen_thread *t, const char* ifname)
{
struct pktgen_dev *pkt_dev;
+ struct proc_dir_entry *pe;
/* We don't allow a device to be on several threads */
- if( (pkt_dev = __pktgen_NN_threads(ifname, FIND)) == NULL) {
-
- pkt_dev = kmalloc(sizeof(struct pktgen_dev), GFP_KERNEL);
- if (!pkt_dev)
- return -ENOMEM;
+ pkt_dev = __pktgen_NN_threads(ifname, FIND);
+ if (pkt_dev) {
+ printk("pktgen: ERROR: interface already used.\n");
+ return -EBUSY;
+ }
- memset(pkt_dev, 0, sizeof(struct pktgen_dev));
+ pkt_dev = kzalloc(sizeof(struct pktgen_dev), GFP_KERNEL);
+ if (!pkt_dev)
+ return -ENOMEM;
- pkt_dev->flows = vmalloc(MAX_CFLOWS*sizeof(struct flow_state));
- if (pkt_dev->flows == NULL) {
- kfree(pkt_dev);
- return -ENOMEM;
- }
- memset(pkt_dev->flows, 0, MAX_CFLOWS*sizeof(struct flow_state));
-
- pkt_dev->min_pkt_size = ETH_ZLEN;
- pkt_dev->max_pkt_size = ETH_ZLEN;
- pkt_dev->nfrags = 0;
- pkt_dev->clone_skb = pg_clone_skb_d;
- pkt_dev->delay_us = pg_delay_d / 1000;
- pkt_dev->delay_ns = pg_delay_d % 1000;
- pkt_dev->count = pg_count_d;
- pkt_dev->sofar = 0;
- pkt_dev->udp_src_min = 9; /* sink port */
- pkt_dev->udp_src_max = 9;
- pkt_dev->udp_dst_min = 9;
- pkt_dev->udp_dst_max = 9;
-
- strncpy(pkt_dev->ifname, ifname, 31);
- sprintf(pkt_dev->fname, "%s/%s", PG_PROC_DIR, ifname);
-
- if (! pktgen_setup_dev(pkt_dev)) {
- printk("pktgen: ERROR: pktgen_setup_dev failed.\n");
- if (pkt_dev->flows)
- vfree(pkt_dev->flows);
- kfree(pkt_dev);
- return -ENODEV;
- }
+ pkt_dev->flows = vmalloc(MAX_CFLOWS*sizeof(struct flow_state));
+ if (pkt_dev->flows == NULL) {
+ kfree(pkt_dev);
+ return -ENOMEM;
+ }
+ memset(pkt_dev->flows, 0, MAX_CFLOWS*sizeof(struct flow_state));
- pkt_dev->proc_ent = create_proc_entry(pkt_dev->fname, 0600, NULL);
- if (!pkt_dev->proc_ent) {
- printk("pktgen: cannot create %s procfs entry.\n", pkt_dev->fname);
- if (pkt_dev->flows)
- vfree(pkt_dev->flows);
- kfree(pkt_dev);
- return -EINVAL;
- }
- pkt_dev->proc_ent->read_proc = proc_if_read;
- pkt_dev->proc_ent->write_proc = proc_if_write;
- pkt_dev->proc_ent->data = (void*)(pkt_dev);
- pkt_dev->proc_ent->owner = THIS_MODULE;
+ pkt_dev->min_pkt_size = ETH_ZLEN;
+ pkt_dev->max_pkt_size = ETH_ZLEN;
+ pkt_dev->nfrags = 0;
+ pkt_dev->clone_skb = pg_clone_skb_d;
+ pkt_dev->delay_us = pg_delay_d / 1000;
+ pkt_dev->delay_ns = pg_delay_d % 1000;
+ pkt_dev->count = pg_count_d;
+ pkt_dev->sofar = 0;
+ pkt_dev->udp_src_min = 9; /* sink port */
+ pkt_dev->udp_src_max = 9;
+ pkt_dev->udp_dst_min = 9;
+ pkt_dev->udp_dst_max = 9;
+
+ strncpy(pkt_dev->ifname, ifname, IFNAMSIZ);
+
+ if (! pktgen_setup_dev(pkt_dev)) {
+ printk("pktgen: ERROR: pktgen_setup_dev failed.\n");
+ if (pkt_dev->flows)
+ vfree(pkt_dev->flows);
+ kfree(pkt_dev);
+ return -ENODEV;
+ }
+
+ pe = create_proc_entry(ifname, 0600, pg_proc_dir);
+ if (!pe) {
+ printk("pktgen: cannot create %s/%s procfs entry.\n",
+ PG_PROC_DIR, ifname);
+ if (pkt_dev->flows)
+ vfree(pkt_dev->flows);
+ kfree(pkt_dev);
+ return -EINVAL;
+ }
+ pe->proc_fops = &pktgen_if_fops;
+ pe->data = pkt_dev;
- return add_dev_to_thread(t, pkt_dev);
- }
- else {
- printk("pktgen: ERROR: interface already used.\n");
- return -EBUSY;
- }
+ return add_dev_to_thread(t, pkt_dev);
}
static struct pktgen_thread *pktgen_find_thread(const char* name)
{
struct pktgen_thread *t = NULL;
- thread_lock();
+ thread_lock();
t = pktgen_threads;
while (t) {
@@ -2947,6 +2903,7 @@ static struct pktgen_thread *pktgen_find_thread(const char* name)
static int pktgen_create_thread(const char* name, int cpu)
{
struct pktgen_thread *t = NULL;
+ struct proc_dir_entry *pe;
if (strlen(name) > 31) {
printk("pktgen: ERROR: Thread name cannot be more than 31 characters.\n");
@@ -2958,28 +2915,26 @@ static int pktgen_create_thread(const char* name, int cpu)
return -EINVAL;
}
- t = (struct pktgen_thread*)(kmalloc(sizeof(struct pktgen_thread), GFP_KERNEL));
+ t = kzalloc(sizeof(struct pktgen_thread), GFP_KERNEL);
if (!t) {
printk("pktgen: ERROR: out of memory, can't create new thread.\n");
return -ENOMEM;
}
- memset(t, 0, sizeof(struct pktgen_thread));
strcpy(t->name, name);
spin_lock_init(&t->if_lock);
t->cpu = cpu;
- sprintf(t->fname, "%s/%s", PG_PROC_DIR, t->name);
- t->proc_ent = create_proc_entry(t->fname, 0600, NULL);
- if (!t->proc_ent) {
- printk("pktgen: cannot create %s procfs entry.\n", t->fname);
+ pe = create_proc_entry(t->name, 0600, pg_proc_dir);
+ if (!pe) {
+ printk("pktgen: cannot create %s/%s procfs entry.\n",
+ PG_PROC_DIR, t->name);
kfree(t);
return -EINVAL;
}
- t->proc_ent->read_proc = proc_thread_read;
- t->proc_ent->write_proc = proc_thread_write;
- t->proc_ent->data = (void*)(t);
- t->proc_ent->owner = THIS_MODULE;
+
+ pe->proc_fops = &pktgen_thread_fops;
+ pe->data = t;
t->next = pktgen_threads;
pktgen_threads = t;
@@ -3034,8 +2989,7 @@ static int pktgen_remove_device(struct pktgen_thread *t, struct pktgen_dev *pkt_
/* Clean up proc file system */
- if (strlen(pkt_dev->fname))
- remove_proc_entry(pkt_dev->fname, NULL);
+ remove_proc_entry(pkt_dev->ifname, pg_proc_dir);
if (pkt_dev->flows)
vfree(pkt_dev->flows);
@@ -3046,31 +3000,31 @@ static int pktgen_remove_device(struct pktgen_thread *t, struct pktgen_dev *pkt_
static int __init pg_init(void)
{
int cpu;
- printk(version);
+ struct proc_dir_entry *pe;
- module_fname[0] = 0;
+ printk(version);
- create_proc_dir();
+ pg_proc_dir = proc_mkdir(PG_PROC_DIR, proc_net);
+ if (!pg_proc_dir)
+ return -ENODEV;
+ pg_proc_dir->owner = THIS_MODULE;
- sprintf(module_fname, "%s/pgctrl", PG_PROC_DIR);
- module_proc_ent = create_proc_entry(module_fname, 0600, NULL);
- if (!module_proc_ent) {
- printk("pktgen: ERROR: cannot create %s procfs entry.\n", module_fname);
+ pe = create_proc_entry(PGCTRL, 0600, pg_proc_dir);
+ if (pe == NULL) {
+ printk("pktgen: ERROR: cannot create %s procfs entry.\n", PGCTRL);
+ proc_net_remove(PG_PROC_DIR);
return -EINVAL;
}
- module_proc_ent->proc_fops = &pktgen_fops;
- module_proc_ent->data = NULL;
+ pe->proc_fops = &pktgen_fops;
+ pe->data = NULL;
/* Register us to receive netdevice events */
register_netdevice_notifier(&pktgen_notifier_block);
- for (cpu = 0; cpu < NR_CPUS ; cpu++) {
+ for_each_online_cpu(cpu) {
char buf[30];
- if (!cpu_online(cpu))
- continue;
-
sprintf(buf, "kpktgend_%i", cpu);
pktgen_create_thread(buf, cpu);
}
@@ -3095,10 +3049,8 @@ static void __exit pg_cleanup(void)
unregister_netdevice_notifier(&pktgen_notifier_block);
/* Clean up proc file system */
-
- remove_proc_entry(module_fname, NULL);
-
- remove_proc_dir();
+ remove_proc_entry(PGCTRL, pg_proc_dir);
+ proc_net_remove(PG_PROC_DIR);
}
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index af9b1516e21..ef9d46b91eb 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -122,6 +122,8 @@ void skb_under_panic(struct sk_buff *skb, int sz, void *here)
* __alloc_skb - allocate a network buffer
* @size: size to allocate
* @gfp_mask: allocation mask
+ * @fclone: allocate from fclone cache instead of head cache
+ * and allocate a cloned (child) skb
*
* Allocate a new &sk_buff. The returned buffer has no headroom and a
* tail room of size bytes. The object has a reference count of one.
@@ -410,6 +412,9 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
C(nfct);
nf_conntrack_get(skb->nfct);
C(nfctinfo);
+#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
+ C(ipvs_property);
+#endif
#ifdef CONFIG_BRIDGE_NETFILTER
C(nf_bridge);
nf_bridge_get(skb->nf_bridge);
@@ -467,6 +472,9 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
new->nfct = old->nfct;
nf_conntrack_get(old->nfct);
new->nfctinfo = old->nfctinfo;
+#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
+ new->ipvs_property = old->ipvs_property;
+#endif
#ifdef CONFIG_BRIDGE_NETFILTER
new->nf_bridge = old->nf_bridge;
nf_bridge_get(old->nf_bridge);
diff --git a/net/core/sock.c b/net/core/sock.c
index 1c52fe809ed..9602ceb3bac 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -940,7 +940,7 @@ static struct sk_buff *sock_alloc_send_pskb(struct sock *sk,
int noblock, int *errcode)
{
struct sk_buff *skb;
- unsigned int gfp_mask;
+ gfp_t gfp_mask;
long timeo;
int err;
diff --git a/net/core/wireless.c b/net/core/wireless.c
index d17f1583ea3..271ddb35b0b 100644
--- a/net/core/wireless.c
+++ b/net/core/wireless.c
@@ -455,10 +455,15 @@ static inline struct iw_statistics *get_wireless_stats(struct net_device *dev)
/* Old location, field to be removed in next WE */
if(dev->get_wireless_stats) {
- printk(KERN_DEBUG "%s (WE) : Driver using old /proc/net/wireless support, please fix driver !\n",
- dev->name);
+ static int printed_message;
+
+ if (!printed_message++)
+ printk(KERN_DEBUG "%s (WE) : Driver using old /proc/net/wireless support, please fix driver !\n",
+ dev->name);
+
return dev->get_wireless_stats(dev);
}
+
/* Not found */
return (struct iw_statistics *) NULL;
}
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index ae088d1347a..6298cf58ff9 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -463,6 +463,7 @@ static int dccp_v4_send_response(struct sock *sk, struct request_sock *req,
if (skb != NULL) {
const struct inet_request_sock *ireq = inet_rsk(req);
+ memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
ireq->rmt_addr,
ireq->opt);
@@ -647,6 +648,7 @@ int dccp_v4_send_reset(struct sock *sk, enum dccp_reset_codes code)
if (skb != NULL) {
const struct inet_sock *inet = inet_sk(sk);
+ memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
err = ip_build_and_send_pkt(skb, sk,
inet->saddr, inet->daddr, NULL);
if (err == NET_XMIT_CN)
diff --git a/net/dccp/output.c b/net/dccp/output.c
index 4786bdcddcc..d59f86f7cea 100644
--- a/net/dccp/output.c
+++ b/net/dccp/output.c
@@ -62,10 +62,8 @@ int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb)
skb->h.raw = skb_push(skb, dccp_header_size);
dh = dccp_hdr(skb);
- /*
- * Data packets are not cloned as they are never retransmitted
- */
- if (skb_cloned(skb))
+
+ if (!skb->sk)
skb_set_owner_w(skb, sk);
/* Build DCCP header and checksum it. */
@@ -102,6 +100,7 @@ int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb)
DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
+ memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
err = ip_queue_xmit(skb, 0);
if (err <= 0)
return err;
@@ -243,7 +242,8 @@ int dccp_write_xmit(struct sock *sk, struct sk_buff *skb, long *timeo)
err = dccp_transmit_skb(sk, skb);
ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, 0, len);
- }
+ } else
+ kfree_skb(skb);
return err;
}
@@ -495,7 +495,7 @@ void dccp_send_close(struct sock *sk, const int active)
{
struct dccp_sock *dp = dccp_sk(sk);
struct sk_buff *skb;
- const unsigned int prio = active ? GFP_KERNEL : GFP_ATOMIC;
+ const gfp_t prio = active ? GFP_KERNEL : GFP_ATOMIC;
skb = alloc_skb(sk->sk_prot->max_header, prio);
if (skb == NULL)
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index a1cfd0e9e3b..a021c3422f6 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -402,8 +402,6 @@ int dccp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
* This bug was _quickly_ found & fixed by just looking at an OSTRA
* generated callgraph 8) -acme
*/
- if (rc != 0)
- goto out_discard;
out_release:
release_sock(sk);
return rc ? : len;
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index 1186dc44cdf..3f25cadccdd 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -719,22 +719,9 @@ static int dn_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
if (saddr->sdn_flags & ~SDF_WILD)
return -EINVAL;
-#if 1
if (!capable(CAP_NET_BIND_SERVICE) && (saddr->sdn_objnum ||
(saddr->sdn_flags & SDF_WILD)))
return -EACCES;
-#else
- /*
- * Maybe put the default actions in the default security ops for
- * dn_prot_sock ? Would be nice if the capable call would go there
- * too.
- */
- if (security_dn_prot_sock(saddr) &&
- !capable(CAP_NET_BIND_SERVICE) ||
- saddr->sdn_objnum || (saddr->sdn_flags & SDF_WILD))
- return -EACCES;
-#endif
-
if (!(saddr->sdn_flags & SDF_WILD)) {
if (dn_ntohs(saddr->sdn_nodeaddrl)) {
diff --git a/net/ieee80211/Makefile b/net/ieee80211/Makefile
index a6ccac5baea..f988417121d 100644
--- a/net/ieee80211/Makefile
+++ b/net/ieee80211/Makefile
@@ -7,5 +7,6 @@ ieee80211-objs := \
ieee80211_module.o \
ieee80211_tx.o \
ieee80211_rx.o \
- ieee80211_wx.o
+ ieee80211_wx.o \
+ ieee80211_geo.o
diff --git a/net/ieee80211/ieee80211_crypt.c b/net/ieee80211/ieee80211_crypt.c
index 61a9d92e455..f3b6aa3be63 100644
--- a/net/ieee80211/ieee80211_crypt.c
+++ b/net/ieee80211/ieee80211_crypt.c
@@ -41,6 +41,12 @@ void ieee80211_crypt_deinit_entries(struct ieee80211_device *ieee, int force)
{
struct list_head *ptr, *n;
struct ieee80211_crypt_data *entry;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ieee->lock, flags);
+
+ if (list_empty(&ieee->crypt_deinit_list))
+ goto unlock;
for (ptr = ieee->crypt_deinit_list.next, n = ptr->next;
ptr != &ieee->crypt_deinit_list; ptr = n, n = ptr->next) {
@@ -57,6 +63,18 @@ void ieee80211_crypt_deinit_entries(struct ieee80211_device *ieee, int force)
}
kfree(entry);
}
+ unlock:
+ spin_unlock_irqrestore(&ieee->lock, flags);
+}
+
+/* After this, crypt_deinit_list won't accept new members */
+void ieee80211_crypt_quiescing(struct ieee80211_device *ieee)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ieee->lock, flags);
+ ieee->crypt_quiesced = 1;
+ spin_unlock_irqrestore(&ieee->lock, flags);
}
void ieee80211_crypt_deinit_handler(unsigned long data)
@@ -64,16 +82,16 @@ void ieee80211_crypt_deinit_handler(unsigned long data)
struct ieee80211_device *ieee = (struct ieee80211_device *)data;
unsigned long flags;
- spin_lock_irqsave(&ieee->lock, flags);
ieee80211_crypt_deinit_entries(ieee, 0);
- if (!list_empty(&ieee->crypt_deinit_list)) {
+
+ spin_lock_irqsave(&ieee->lock, flags);
+ if (!list_empty(&ieee->crypt_deinit_list) && !ieee->crypt_quiesced) {
printk(KERN_DEBUG "%s: entries remaining in delayed crypt "
"deletion list\n", ieee->dev->name);
ieee->crypt_deinit_timer.expires = jiffies + HZ;
add_timer(&ieee->crypt_deinit_timer);
}
spin_unlock_irqrestore(&ieee->lock, flags);
-
}
void ieee80211_crypt_delayed_deinit(struct ieee80211_device *ieee,
@@ -93,10 +111,12 @@ void ieee80211_crypt_delayed_deinit(struct ieee80211_device *ieee,
* locking. */
spin_lock_irqsave(&ieee->lock, flags);
- list_add(&tmp->list, &ieee->crypt_deinit_list);
- if (!timer_pending(&ieee->crypt_deinit_timer)) {
- ieee->crypt_deinit_timer.expires = jiffies + HZ;
- add_timer(&ieee->crypt_deinit_timer);
+ if (!ieee->crypt_quiesced) {
+ list_add(&tmp->list, &ieee->crypt_deinit_list);
+ if (!timer_pending(&ieee->crypt_deinit_timer)) {
+ ieee->crypt_deinit_timer.expires = jiffies + HZ;
+ add_timer(&ieee->crypt_deinit_timer);
+ }
}
spin_unlock_irqrestore(&ieee->lock, flags);
}
@@ -191,18 +211,18 @@ static void ieee80211_crypt_null_deinit(void *priv)
}
static struct ieee80211_crypto_ops ieee80211_crypt_null = {
- .name = "NULL",
- .init = ieee80211_crypt_null_init,
- .deinit = ieee80211_crypt_null_deinit,
- .encrypt_mpdu = NULL,
- .decrypt_mpdu = NULL,
- .encrypt_msdu = NULL,
- .decrypt_msdu = NULL,
- .set_key = NULL,
- .get_key = NULL,
- .extra_prefix_len = 0,
- .extra_postfix_len = 0,
- .owner = THIS_MODULE,
+ .name = "NULL",
+ .init = ieee80211_crypt_null_init,
+ .deinit = ieee80211_crypt_null_deinit,
+ .encrypt_mpdu = NULL,
+ .decrypt_mpdu = NULL,
+ .encrypt_msdu = NULL,
+ .decrypt_msdu = NULL,
+ .set_key = NULL,
+ .get_key = NULL,
+ .extra_mpdu_prefix_len = 0,
+ .extra_mpdu_postfix_len = 0,
+ .owner = THIS_MODULE,
};
static int __init ieee80211_crypto_init(void)
@@ -249,6 +269,7 @@ static void __exit ieee80211_crypto_deinit(void)
EXPORT_SYMBOL(ieee80211_crypt_deinit_entries);
EXPORT_SYMBOL(ieee80211_crypt_deinit_handler);
EXPORT_SYMBOL(ieee80211_crypt_delayed_deinit);
+EXPORT_SYMBOL(ieee80211_crypt_quiescing);
EXPORT_SYMBOL(ieee80211_register_crypto_ops);
EXPORT_SYMBOL(ieee80211_unregister_crypto_ops);
diff --git a/net/ieee80211/ieee80211_crypt_ccmp.c b/net/ieee80211/ieee80211_crypt_ccmp.c
index 8fc13f45971..05a853c1301 100644
--- a/net/ieee80211/ieee80211_crypt_ccmp.c
+++ b/net/ieee80211/ieee80211_crypt_ccmp.c
@@ -119,7 +119,7 @@ static inline void xor_block(u8 * b, u8 * a, size_t len)
}
static void ccmp_init_blocks(struct crypto_tfm *tfm,
- struct ieee80211_hdr *hdr,
+ struct ieee80211_hdr_4addr *hdr,
u8 * pn, size_t dlen, u8 * b0, u8 * auth, u8 * s0)
{
u8 *pos, qc = 0;
@@ -191,26 +191,18 @@ static void ccmp_init_blocks(struct crypto_tfm *tfm,
ieee80211_ccmp_aes_encrypt(tfm, b0, s0);
}
-static int ieee80211_ccmp_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
+static int ieee80211_ccmp_hdr(struct sk_buff *skb, int hdr_len, void *priv)
{
struct ieee80211_ccmp_data *key = priv;
- int data_len, i, blocks, last, len;
- u8 *pos, *mic;
- struct ieee80211_hdr *hdr;
- u8 *b0 = key->tx_b0;
- u8 *b = key->tx_b;
- u8 *e = key->tx_e;
- u8 *s0 = key->tx_s0;
+ int i;
+ u8 *pos;
- if (skb_headroom(skb) < CCMP_HDR_LEN ||
- skb_tailroom(skb) < CCMP_MIC_LEN || skb->len < hdr_len)
+ if (skb_headroom(skb) < CCMP_HDR_LEN || skb->len < hdr_len)
return -1;
- data_len = skb->len - hdr_len;
pos = skb_push(skb, CCMP_HDR_LEN);
memmove(pos, pos + CCMP_HDR_LEN, hdr_len);
pos += hdr_len;
- mic = skb_put(skb, CCMP_MIC_LEN);
i = CCMP_PN_LEN - 1;
while (i >= 0) {
@@ -229,7 +221,31 @@ static int ieee80211_ccmp_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
*pos++ = key->tx_pn[1];
*pos++ = key->tx_pn[0];
- hdr = (struct ieee80211_hdr *)skb->data;
+ return CCMP_HDR_LEN;
+}
+
+static int ieee80211_ccmp_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
+{
+ struct ieee80211_ccmp_data *key = priv;
+ int data_len, i, blocks, last, len;
+ u8 *pos, *mic;
+ struct ieee80211_hdr_4addr *hdr;
+ u8 *b0 = key->tx_b0;
+ u8 *b = key->tx_b;
+ u8 *e = key->tx_e;
+ u8 *s0 = key->tx_s0;
+
+ if (skb_tailroom(skb) < CCMP_MIC_LEN || skb->len < hdr_len)
+ return -1;
+
+ data_len = skb->len - hdr_len;
+ len = ieee80211_ccmp_hdr(skb, hdr_len, priv);
+ if (len < 0)
+ return -1;
+
+ pos = skb->data + hdr_len + CCMP_HDR_LEN;
+ mic = skb_put(skb, CCMP_MIC_LEN);
+ hdr = (struct ieee80211_hdr_4addr *)skb->data;
ccmp_init_blocks(key->tfm, hdr, key->tx_pn, data_len, b0, b, s0);
blocks = (data_len + AES_BLOCK_LEN - 1) / AES_BLOCK_LEN;
@@ -258,7 +274,7 @@ static int ieee80211_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
{
struct ieee80211_ccmp_data *key = priv;
u8 keyidx, *pos;
- struct ieee80211_hdr *hdr;
+ struct ieee80211_hdr_4addr *hdr;
u8 *b0 = key->rx_b0;
u8 *b = key->rx_b;
u8 *a = key->rx_a;
@@ -272,7 +288,7 @@ static int ieee80211_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
return -1;
}
- hdr = (struct ieee80211_hdr *)skb->data;
+ hdr = (struct ieee80211_hdr_4addr *)skb->data;
pos = skb->data + hdr_len;
keyidx = pos[3];
if (!(keyidx & (1 << 5))) {
@@ -426,19 +442,20 @@ static char *ieee80211_ccmp_print_stats(char *p, void *priv)
}
static struct ieee80211_crypto_ops ieee80211_crypt_ccmp = {
- .name = "CCMP",
- .init = ieee80211_ccmp_init,
- .deinit = ieee80211_ccmp_deinit,
- .encrypt_mpdu = ieee80211_ccmp_encrypt,
- .decrypt_mpdu = ieee80211_ccmp_decrypt,
- .encrypt_msdu = NULL,
- .decrypt_msdu = NULL,
- .set_key = ieee80211_ccmp_set_key,
- .get_key = ieee80211_ccmp_get_key,
- .print_stats = ieee80211_ccmp_print_stats,
- .extra_prefix_len = CCMP_HDR_LEN,
- .extra_postfix_len = CCMP_MIC_LEN,
- .owner = THIS_MODULE,
+ .name = "CCMP",
+ .init = ieee80211_ccmp_init,
+ .deinit = ieee80211_ccmp_deinit,
+ .build_iv = ieee80211_ccmp_hdr,
+ .encrypt_mpdu = ieee80211_ccmp_encrypt,
+ .decrypt_mpdu = ieee80211_ccmp_decrypt,
+ .encrypt_msdu = NULL,
+ .decrypt_msdu = NULL,
+ .set_key = ieee80211_ccmp_set_key,
+ .get_key = ieee80211_ccmp_get_key,
+ .print_stats = ieee80211_ccmp_print_stats,
+ .extra_mpdu_prefix_len = CCMP_HDR_LEN,
+ .extra_mpdu_postfix_len = CCMP_MIC_LEN,
+ .owner = THIS_MODULE,
};
static int __init ieee80211_crypto_ccmp_init(void)
diff --git a/net/ieee80211/ieee80211_crypt_tkip.c b/net/ieee80211/ieee80211_crypt_tkip.c
index d4f9164be1a..2e34f29b795 100644
--- a/net/ieee80211/ieee80211_crypt_tkip.c
+++ b/net/ieee80211/ieee80211_crypt_tkip.c
@@ -59,8 +59,24 @@ struct ieee80211_tkip_data {
/* scratch buffers for virt_to_page() (crypto API) */
u8 rx_hdr[16], tx_hdr[16];
+
+ unsigned long flags;
};
+static unsigned long ieee80211_tkip_set_flags(unsigned long flags, void *priv)
+{
+ struct ieee80211_tkip_data *_priv = priv;
+ unsigned long old_flags = _priv->flags;
+ _priv->flags = flags;
+ return old_flags;
+}
+
+static unsigned long ieee80211_tkip_get_flags(void *priv)
+{
+ struct ieee80211_tkip_data *_priv = priv;
+ return _priv->flags;
+}
+
static void *ieee80211_tkip_init(int key_idx)
{
struct ieee80211_tkip_data *priv;
@@ -69,6 +85,7 @@ static void *ieee80211_tkip_init(int key_idx)
if (priv == NULL)
goto fail;
memset(priv, 0, sizeof(*priv));
+
priv->key_idx = key_idx;
priv->tfm_arc4 = crypto_alloc_tfm("arc4", 0);
@@ -255,25 +272,27 @@ static void tkip_mixing_phase2(u8 * WEPSeed, const u8 * TK, const u16 * TTAK,
#endif
}
-static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
+static u8 *ieee80211_tkip_hdr(struct sk_buff *skb, int hdr_len, void *priv)
{
struct ieee80211_tkip_data *tkey = priv;
int len;
- u8 rc4key[16], *pos, *icv;
- struct ieee80211_hdr *hdr;
+ u8 *rc4key, *pos, *icv;
+ struct ieee80211_hdr_4addr *hdr;
u32 crc;
- struct scatterlist sg;
- if (skb_headroom(skb) < 8 || skb_tailroom(skb) < 4 ||
- skb->len < hdr_len)
- return -1;
+ hdr = (struct ieee80211_hdr_4addr *)skb->data;
+
+ if (skb_headroom(skb) < 8 || skb->len < hdr_len)
+ return NULL;
- hdr = (struct ieee80211_hdr *)skb->data;
if (!tkey->tx_phase1_done) {
tkip_mixing_phase1(tkey->tx_ttak, tkey->key, hdr->addr2,
tkey->tx_iv32);
tkey->tx_phase1_done = 1;
}
+ rc4key = kmalloc(16, GFP_ATOMIC);
+ if (!rc4key)
+ return NULL;
tkip_mixing_phase2(rc4key, tkey->key, tkey->tx_ttak, tkey->tx_iv16);
len = skb->len - hdr_len;
@@ -282,9 +301,9 @@ static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
pos += hdr_len;
icv = skb_put(skb, 4);
- *pos++ = rc4key[0];
- *pos++ = rc4key[1];
- *pos++ = rc4key[2];
+ *pos++ = *rc4key;
+ *pos++ = *(rc4key + 1);
+ *pos++ = *(rc4key + 2);
*pos++ = (tkey->key_idx << 6) | (1 << 5) /* Ext IV included */ ;
*pos++ = tkey->tx_iv32 & 0xff;
*pos++ = (tkey->tx_iv32 >> 8) & 0xff;
@@ -297,6 +316,38 @@ static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
icv[2] = crc >> 16;
icv[3] = crc >> 24;
+ return rc4key;
+}
+
+static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
+{
+ struct ieee80211_tkip_data *tkey = priv;
+ int len;
+ const u8 *rc4key;
+ u8 *pos;
+ struct scatterlist sg;
+
+ if (tkey->flags & IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) {
+ if (net_ratelimit()) {
+ struct ieee80211_hdr_4addr *hdr =
+ (struct ieee80211_hdr_4addr *)skb->data;
+ printk(KERN_DEBUG "TKIP countermeasures: dropped "
+ "TX packet to " MAC_FMT "\n",
+ MAC_ARG(hdr->addr1));
+ }
+ return -1;
+ }
+
+ if (skb_tailroom(skb) < 4 || skb->len < hdr_len)
+ return -1;
+
+ len = skb->len - hdr_len;
+ pos = skb->data + hdr_len;
+
+ rc4key = ieee80211_tkip_hdr(skb, hdr_len, priv);
+ if (!rc4key)
+ return -1;
+
crypto_cipher_setkey(tkey->tfm_arc4, rc4key, 16);
sg.page = virt_to_page(pos);
sg.offset = offset_in_page(pos);
@@ -319,16 +370,26 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
u8 keyidx, *pos;
u32 iv32;
u16 iv16;
- struct ieee80211_hdr *hdr;
+ struct ieee80211_hdr_4addr *hdr;
u8 icv[4];
u32 crc;
struct scatterlist sg;
int plen;
+ hdr = (struct ieee80211_hdr_4addr *)skb->data;
+
+ if (tkey->flags & IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) {
+ if (net_ratelimit()) {
+ printk(KERN_DEBUG "TKIP countermeasures: dropped "
+ "received packet from " MAC_FMT "\n",
+ MAC_ARG(hdr->addr2));
+ }
+ return -1;
+ }
+
if (skb->len < hdr_len + 8 + 4)
return -1;
- hdr = (struct ieee80211_hdr *)skb->data;
pos = skb->data + hdr_len;
keyidx = pos[3];
if (!(keyidx & (1 << 5))) {
@@ -441,9 +502,9 @@ static int michael_mic(struct ieee80211_tkip_data *tkey, u8 * key, u8 * hdr,
static void michael_mic_hdr(struct sk_buff *skb, u8 * hdr)
{
- struct ieee80211_hdr *hdr11;
+ struct ieee80211_hdr_4addr *hdr11;
- hdr11 = (struct ieee80211_hdr *)skb->data;
+ hdr11 = (struct ieee80211_hdr_4addr *)skb->data;
switch (le16_to_cpu(hdr11->frame_ctl) &
(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS)) {
case IEEE80211_FCTL_TODS:
@@ -490,9 +551,9 @@ static int ieee80211_michael_mic_add(struct sk_buff *skb, int hdr_len,
return 0;
}
-#if WIRELESS_EXT >= 18
static void ieee80211_michael_mic_failure(struct net_device *dev,
- struct ieee80211_hdr *hdr, int keyidx)
+ struct ieee80211_hdr_4addr *hdr,
+ int keyidx)
{
union iwreq_data wrqu;
struct iw_michaelmicfailure ev;
@@ -510,28 +571,6 @@ static void ieee80211_michael_mic_failure(struct net_device *dev,
wrqu.data.length = sizeof(ev);
wireless_send_event(dev, IWEVMICHAELMICFAILURE, &wrqu, (char *)&ev);
}
-#elif WIRELESS_EXT >= 15
-static void ieee80211_michael_mic_failure(struct net_device *dev,
- struct ieee80211_hdr *hdr, int keyidx)
-{
- union iwreq_data wrqu;
- char buf[128];
-
- /* TODO: needed parameters: count, keyid, key type, TSC */
- sprintf(buf, "MLME-MICHAELMICFAILURE.indication(keyid=%d %scast addr="
- MAC_FMT ")", keyidx, hdr->addr1[0] & 0x01 ? "broad" : "uni",
- MAC_ARG(hdr->addr2));
- memset(&wrqu, 0, sizeof(wrqu));
- wrqu.data.length = strlen(buf);
- wireless_send_event(dev, IWEVCUSTOM, &wrqu, buf);
-}
-#else /* WIRELESS_EXT >= 15 */
-static inline void ieee80211_michael_mic_failure(struct net_device *dev,
- struct ieee80211_hdr *hdr,
- int keyidx)
-{
-}
-#endif /* WIRELESS_EXT >= 15 */
static int ieee80211_michael_mic_verify(struct sk_buff *skb, int keyidx,
int hdr_len, void *priv)
@@ -547,8 +586,8 @@ static int ieee80211_michael_mic_verify(struct sk_buff *skb, int keyidx,
skb->data + hdr_len, skb->len - 8 - hdr_len, mic))
return -1;
if (memcmp(mic, skb->data + skb->len - 8, 8) != 0) {
- struct ieee80211_hdr *hdr;
- hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_hdr_4addr *hdr;
+ hdr = (struct ieee80211_hdr_4addr *)skb->data;
printk(KERN_DEBUG "%s: Michael MIC verification failed for "
"MSDU from " MAC_FMT " keyidx=%d\n",
skb->dev ? skb->dev->name : "N/A", MAC_ARG(hdr->addr2),
@@ -654,19 +693,22 @@ static char *ieee80211_tkip_print_stats(char *p, void *priv)
}
static struct ieee80211_crypto_ops ieee80211_crypt_tkip = {
- .name = "TKIP",
- .init = ieee80211_tkip_init,
- .deinit = ieee80211_tkip_deinit,
- .encrypt_mpdu = ieee80211_tkip_encrypt,
- .decrypt_mpdu = ieee80211_tkip_decrypt,
- .encrypt_msdu = ieee80211_michael_mic_add,
- .decrypt_msdu = ieee80211_michael_mic_verify,
- .set_key = ieee80211_tkip_set_key,
- .get_key = ieee80211_tkip_get_key,
- .print_stats = ieee80211_tkip_print_stats,
- .extra_prefix_len = 4 + 4, /* IV + ExtIV */
- .extra_postfix_len = 8 + 4, /* MIC + ICV */
- .owner = THIS_MODULE,
+ .name = "TKIP",
+ .init = ieee80211_tkip_init,
+ .deinit = ieee80211_tkip_deinit,
+ .encrypt_mpdu = ieee80211_tkip_encrypt,
+ .decrypt_mpdu = ieee80211_tkip_decrypt,
+ .encrypt_msdu = ieee80211_michael_mic_add,
+ .decrypt_msdu = ieee80211_michael_mic_verify,
+ .set_key = ieee80211_tkip_set_key,
+ .get_key = ieee80211_tkip_get_key,
+ .print_stats = ieee80211_tkip_print_stats,
+ .extra_mpdu_prefix_len = 4 + 4, /* IV + ExtIV */
+ .extra_mpdu_postfix_len = 4, /* ICV */
+ .extra_msdu_postfix_len = 8, /* MIC */
+ .get_flags = ieee80211_tkip_get_flags,
+ .set_flags = ieee80211_tkip_set_flags,
+ .owner = THIS_MODULE,
};
static int __init ieee80211_crypto_tkip_init(void)
diff --git a/net/ieee80211/ieee80211_crypt_wep.c b/net/ieee80211/ieee80211_crypt_wep.c
index b4d2514a090..7c08ed2f262 100644
--- a/net/ieee80211/ieee80211_crypt_wep.c
+++ b/net/ieee80211/ieee80211_crypt_wep.c
@@ -229,19 +229,19 @@ static char *prism2_wep_print_stats(char *p, void *priv)
}
static struct ieee80211_crypto_ops ieee80211_crypt_wep = {
- .name = "WEP",
- .init = prism2_wep_init,
- .deinit = prism2_wep_deinit,
- .encrypt_mpdu = prism2_wep_encrypt,
- .decrypt_mpdu = prism2_wep_decrypt,
- .encrypt_msdu = NULL,
- .decrypt_msdu = NULL,
- .set_key = prism2_wep_set_key,
- .get_key = prism2_wep_get_key,
- .print_stats = prism2_wep_print_stats,
- .extra_prefix_len = 4, /* IV */
- .extra_postfix_len = 4, /* ICV */
- .owner = THIS_MODULE,
+ .name = "WEP",
+ .init = prism2_wep_init,
+ .deinit = prism2_wep_deinit,
+ .encrypt_mpdu = prism2_wep_encrypt,
+ .decrypt_mpdu = prism2_wep_decrypt,
+ .encrypt_msdu = NULL,
+ .decrypt_msdu = NULL,
+ .set_key = prism2_wep_set_key,
+ .get_key = prism2_wep_get_key,
+ .print_stats = prism2_wep_print_stats,
+ .extra_mpdu_prefix_len = 4, /* IV */
+ .extra_mpdu_postfix_len = 4, /* ICV */
+ .owner = THIS_MODULE,
};
static int __init ieee80211_crypto_wep_init(void)
diff --git a/net/ieee80211/ieee80211_geo.c b/net/ieee80211/ieee80211_geo.c
new file mode 100644
index 00000000000..c4b54ef8f6d
--- /dev/null
+++ b/net/ieee80211/ieee80211_geo.c
@@ -0,0 +1,141 @@
+/******************************************************************************
+
+ Copyright(c) 2005 Intel Corporation. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc., 59
+ Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+ The full GNU General Public License is included in this distribution in the
+ file called LICENSE.
+
+ Contact Information:
+ James P. Ketrenos <ipw2100-admin@linux.intel.com>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+******************************************************************************/
+#include <linux/compiler.h>
+#include <linux/config.h>
+#include <linux/errno.h>
+#include <linux/if_arp.h>
+#include <linux/in6.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/proc_fs.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/tcp.h>
+#include <linux/types.h>
+#include <linux/version.h>
+#include <linux/wireless.h>
+#include <linux/etherdevice.h>
+#include <asm/uaccess.h>
+
+#include <net/ieee80211.h>
+
+int ieee80211_is_valid_channel(struct ieee80211_device *ieee, u8 channel)
+{
+ int i;
+
+ /* Driver needs to initialize the geography map before using
+ * these helper functions */
+ BUG_ON(ieee->geo.bg_channels == 0 && ieee->geo.a_channels == 0);
+
+ if (ieee->freq_band & IEEE80211_24GHZ_BAND)
+ for (i = 0; i < ieee->geo.bg_channels; i++)
+ /* NOTE: If G mode is currently supported but
+ * this is a B only channel, we don't see it
+ * as valid. */
+ if ((ieee->geo.bg[i].channel == channel) &&
+ (!(ieee->mode & IEEE_G) ||
+ !(ieee->geo.bg[i].flags & IEEE80211_CH_B_ONLY)))
+ return IEEE80211_24GHZ_BAND;
+
+ if (ieee->freq_band & IEEE80211_52GHZ_BAND)
+ for (i = 0; i < ieee->geo.a_channels; i++)
+ if (ieee->geo.a[i].channel == channel)
+ return IEEE80211_52GHZ_BAND;
+
+ return 0;
+}
+
+int ieee80211_channel_to_index(struct ieee80211_device *ieee, u8 channel)
+{
+ int i;
+
+ /* Driver needs to initialize the geography map before using
+ * these helper functions */
+ BUG_ON(ieee->geo.bg_channels == 0 && ieee->geo.a_channels == 0);
+
+ if (ieee->freq_band & IEEE80211_24GHZ_BAND)
+ for (i = 0; i < ieee->geo.bg_channels; i++)
+ if (ieee->geo.bg[i].channel == channel)
+ return i;
+
+ if (ieee->freq_band & IEEE80211_52GHZ_BAND)
+ for (i = 0; i < ieee->geo.a_channels; i++)
+ if (ieee->geo.a[i].channel == channel)
+ return i;
+
+ return -1;
+}
+
+u8 ieee80211_freq_to_channel(struct ieee80211_device * ieee, u32 freq)
+{
+ int i;
+
+ /* Driver needs to initialize the geography map before using
+ * these helper functions */
+ BUG_ON(ieee->geo.bg_channels == 0 && ieee->geo.a_channels == 0);
+
+ freq /= 100000;
+
+ if (ieee->freq_band & IEEE80211_24GHZ_BAND)
+ for (i = 0; i < ieee->geo.bg_channels; i++)
+ if (ieee->geo.bg[i].freq == freq)
+ return ieee->geo.bg[i].channel;
+
+ if (ieee->freq_band & IEEE80211_52GHZ_BAND)
+ for (i = 0; i < ieee->geo.a_channels; i++)
+ if (ieee->geo.a[i].freq == freq)
+ return ieee->geo.a[i].channel;
+
+ return 0;
+}
+
+int ieee80211_set_geo(struct ieee80211_device *ieee,
+ const struct ieee80211_geo *geo)
+{
+ memcpy(ieee->geo.name, geo->name, 3);
+ ieee->geo.name[3] = '\0';
+ ieee->geo.bg_channels = geo->bg_channels;
+ ieee->geo.a_channels = geo->a_channels;
+ memcpy(ieee->geo.bg, geo->bg, geo->bg_channels *
+ sizeof(struct ieee80211_channel));
+ memcpy(ieee->geo.a, geo->a, ieee->geo.a_channels *
+ sizeof(struct ieee80211_channel));
+ return 0;
+}
+
+const struct ieee80211_geo *ieee80211_get_geo(struct ieee80211_device *ieee)
+{
+ return &ieee->geo;
+}
+
+EXPORT_SYMBOL(ieee80211_is_valid_channel);
+EXPORT_SYMBOL(ieee80211_freq_to_channel);
+EXPORT_SYMBOL(ieee80211_channel_to_index);
+EXPORT_SYMBOL(ieee80211_set_geo);
+EXPORT_SYMBOL(ieee80211_get_geo);
diff --git a/net/ieee80211/ieee80211_module.c b/net/ieee80211/ieee80211_module.c
index 6059e9e3712..f66d792cd20 100644
--- a/net/ieee80211/ieee80211_module.c
+++ b/net/ieee80211/ieee80211_module.c
@@ -1,6 +1,6 @@
/*******************************************************************************
- Copyright(c) 2004 Intel Corporation. All rights reserved.
+ Copyright(c) 2004-2005 Intel Corporation. All rights reserved.
Portions of this file are based on the WEP enablement code provided by the
Host AP project hostap-drivers v0.1.3
@@ -53,12 +53,15 @@
#include <net/ieee80211.h>
-MODULE_DESCRIPTION("802.11 data/management/control stack");
-MODULE_AUTHOR
- ("Copyright (C) 2004 Intel Corporation <jketreno@linux.intel.com>");
-MODULE_LICENSE("GPL");
+#define DRV_DESCRIPTION "802.11 data/management/control stack"
+#define DRV_NAME "ieee80211"
+#define DRV_VERSION IEEE80211_VERSION
+#define DRV_COPYRIGHT "Copyright (C) 2004-2005 Intel Corporation <jketreno@linux.intel.com>"
-#define DRV_NAME "ieee80211"
+MODULE_VERSION(DRV_VERSION);
+MODULE_DESCRIPTION(DRV_DESCRIPTION);
+MODULE_AUTHOR(DRV_COPYRIGHT);
+MODULE_LICENSE("GPL");
static inline int ieee80211_networks_allocate(struct ieee80211_device *ieee)
{
@@ -126,26 +129,34 @@ struct net_device *alloc_ieee80211(int sizeof_priv)
/* Default fragmentation threshold is maximum payload size */
ieee->fts = DEFAULT_FTS;
+ ieee->rts = DEFAULT_FTS;
ieee->scan_age = DEFAULT_MAX_SCAN_AGE;
ieee->open_wep = 1;
/* Default to enabling full open WEP with host based encrypt/decrypt */
ieee->host_encrypt = 1;
ieee->host_decrypt = 1;
+ ieee->host_mc_decrypt = 1;
+
+ /* Host fragementation in Open mode. Default is enabled.
+ * Note: host fragmentation is always enabled if host encryption
+ * is enabled. For cards can do hardware encryption, they must do
+ * hardware fragmentation as well. So we don't need a variable
+ * like host_enc_frag. */
+ ieee->host_open_frag = 1;
ieee->ieee802_1x = 1; /* Default to supporting 802.1x */
INIT_LIST_HEAD(&ieee->crypt_deinit_list);
init_timer(&ieee->crypt_deinit_timer);
ieee->crypt_deinit_timer.data = (unsigned long)ieee;
ieee->crypt_deinit_timer.function = ieee80211_crypt_deinit_handler;
+ ieee->crypt_quiesced = 0;
spin_lock_init(&ieee->lock);
ieee->wpa_enabled = 0;
- ieee->tkip_countermeasures = 0;
ieee->drop_unencrypted = 0;
ieee->privacy_invoked = 0;
- ieee->ieee802_1x = 1;
return dev;
@@ -161,6 +172,7 @@ void free_ieee80211(struct net_device *dev)
int i;
+ ieee80211_crypt_quiescing(ieee);
del_timer_sync(&ieee->crypt_deinit_timer);
ieee80211_crypt_deinit_entries(ieee, 1);
@@ -195,38 +207,26 @@ static int show_debug_level(char *page, char **start, off_t offset,
static int store_debug_level(struct file *file, const char __user * buffer,
unsigned long count, void *data)
{
- char buf[] = "0x00000000";
- char *p = (char *)buf;
+ char buf[] = "0x00000000\n";
+ unsigned long len = min((unsigned long)sizeof(buf) - 1, count);
unsigned long val;
- if (count > sizeof(buf) - 1)
- count = sizeof(buf) - 1;
-
- if (copy_from_user(buf, buffer, count))
+ if (copy_from_user(buf, buffer, len))
return count;
- buf[count] = 0;
- /*
- * what a FPOS... What, sscanf(buf, "%i", &val) would be too
- * scary?
- */
- if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
- p++;
- if (p[0] == 'x' || p[0] == 'X')
- p++;
- val = simple_strtoul(p, &p, 16);
- } else
- val = simple_strtoul(p, &p, 10);
- if (p == buf)
+ buf[len] = 0;
+ if (sscanf(buf, "%li", &val) != 1)
printk(KERN_INFO DRV_NAME
": %s is not in hex or decimal form.\n", buf);
else
ieee80211_debug_level = val;
- return strlen(buf);
+ return strnlen(buf, len);
}
+#endif /* CONFIG_IEEE80211_DEBUG */
static int __init ieee80211_init(void)
{
+#ifdef CONFIG_IEEE80211_DEBUG
struct proc_dir_entry *e;
ieee80211_debug_level = debug;
@@ -246,26 +246,33 @@ static int __init ieee80211_init(void)
e->read_proc = show_debug_level;
e->write_proc = store_debug_level;
e->data = NULL;
+#endif /* CONFIG_IEEE80211_DEBUG */
+
+ printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n");
+ printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n");
return 0;
}
static void __exit ieee80211_exit(void)
{
+#ifdef CONFIG_IEEE80211_DEBUG
if (ieee80211_proc) {
remove_proc_entry("debug_level", ieee80211_proc);
remove_proc_entry(DRV_NAME, proc_net);
ieee80211_proc = NULL;
}
+#endif /* CONFIG_IEEE80211_DEBUG */
}
+#ifdef CONFIG_IEEE80211_DEBUG
#include <linux/moduleparam.h>
module_param(debug, int, 0444);
MODULE_PARM_DESC(debug, "debug output mask");
+#endif /* CONFIG_IEEE80211_DEBUG */
module_exit(ieee80211_exit);
module_init(ieee80211_init);
-#endif
const char *escape_essid(const char *essid, u8 essid_len)
{
diff --git a/net/ieee80211/ieee80211_rx.c b/net/ieee80211/ieee80211_rx.c
index f7dcd854139..ce694cf5c16 100644
--- a/net/ieee80211/ieee80211_rx.c
+++ b/net/ieee80211/ieee80211_rx.c
@@ -5,7 +5,7 @@
* Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
* <jkmaline@cc.hut.fi>
* Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
- * Copyright (c) 2004, Intel Corporation
+ * Copyright (c) 2004-2005, Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -87,7 +87,7 @@ static struct ieee80211_frag_entry *ieee80211_frag_cache_find(struct
/* Called only as a tasklet (software IRQ) */
static struct sk_buff *ieee80211_frag_cache_get(struct ieee80211_device *ieee,
- struct ieee80211_hdr *hdr)
+ struct ieee80211_hdr_4addr *hdr)
{
struct sk_buff *skb = NULL;
u16 sc;
@@ -101,7 +101,7 @@ static struct sk_buff *ieee80211_frag_cache_get(struct ieee80211_device *ieee,
if (frag == 0) {
/* Reserve enough space to fit maximum frame length */
skb = dev_alloc_skb(ieee->dev->mtu +
- sizeof(struct ieee80211_hdr) +
+ sizeof(struct ieee80211_hdr_4addr) +
8 /* LLC */ +
2 /* alignment */ +
8 /* WEP */ + ETH_ALEN /* WDS */ );
@@ -138,7 +138,7 @@ static struct sk_buff *ieee80211_frag_cache_get(struct ieee80211_device *ieee,
/* Called only as a tasklet (software IRQ) */
static int ieee80211_frag_cache_invalidate(struct ieee80211_device *ieee,
- struct ieee80211_hdr *hdr)
+ struct ieee80211_hdr_4addr *hdr)
{
u16 sc;
unsigned int seq;
@@ -176,7 +176,7 @@ ieee80211_rx_frame_mgmt(struct ieee80211_device *ieee, struct sk_buff *skb,
ieee->dev->name);
return 0;
/*
- hostap_update_sta_ps(ieee, (struct hostap_ieee80211_hdr *)
+ hostap_update_sta_ps(ieee, (struct hostap_ieee80211_hdr_4addr *)
skb->data);*/
}
@@ -232,13 +232,13 @@ static int ieee80211_is_eapol_frame(struct ieee80211_device *ieee,
{
struct net_device *dev = ieee->dev;
u16 fc, ethertype;
- struct ieee80211_hdr *hdr;
+ struct ieee80211_hdr_3addr *hdr;
u8 *pos;
if (skb->len < 24)
return 0;
- hdr = (struct ieee80211_hdr *)skb->data;
+ hdr = (struct ieee80211_hdr_3addr *)skb->data;
fc = le16_to_cpu(hdr->frame_ctl);
/* check that the frame is unicast frame to us */
@@ -271,26 +271,15 @@ static inline int
ieee80211_rx_frame_decrypt(struct ieee80211_device *ieee, struct sk_buff *skb,
struct ieee80211_crypt_data *crypt)
{
- struct ieee80211_hdr *hdr;
+ struct ieee80211_hdr_3addr *hdr;
int res, hdrlen;
if (crypt == NULL || crypt->ops->decrypt_mpdu == NULL)
return 0;
- hdr = (struct ieee80211_hdr *)skb->data;
+ hdr = (struct ieee80211_hdr_3addr *)skb->data;
hdrlen = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
-#ifdef CONFIG_IEEE80211_CRYPT_TKIP
- if (ieee->tkip_countermeasures && strcmp(crypt->ops->name, "TKIP") == 0) {
- if (net_ratelimit()) {
- printk(KERN_DEBUG "%s: TKIP countermeasures: dropped "
- "received packet from " MAC_FMT "\n",
- ieee->dev->name, MAC_ARG(hdr->addr2));
- }
- return -1;
- }
-#endif
-
atomic_inc(&crypt->refcnt);
res = crypt->ops->decrypt_mpdu(skb, hdrlen, crypt->priv);
atomic_dec(&crypt->refcnt);
@@ -314,13 +303,13 @@ ieee80211_rx_frame_decrypt_msdu(struct ieee80211_device *ieee,
struct sk_buff *skb, int keyidx,
struct ieee80211_crypt_data *crypt)
{
- struct ieee80211_hdr *hdr;
+ struct ieee80211_hdr_3addr *hdr;
int res, hdrlen;
if (crypt == NULL || crypt->ops->decrypt_msdu == NULL)
return 0;
- hdr = (struct ieee80211_hdr *)skb->data;
+ hdr = (struct ieee80211_hdr_3addr *)skb->data;
hdrlen = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
atomic_inc(&crypt->refcnt);
@@ -343,7 +332,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
struct ieee80211_rx_stats *rx_stats)
{
struct net_device *dev = ieee->dev;
- struct ieee80211_hdr *hdr;
+ struct ieee80211_hdr_4addr *hdr;
size_t hdrlen;
u16 fc, type, stype, sc;
struct net_device_stats *stats;
@@ -363,7 +352,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
struct ieee80211_crypt_data *crypt = NULL;
int keyidx = 0;
- hdr = (struct ieee80211_hdr *)skb->data;
+ hdr = (struct ieee80211_hdr_4addr *)skb->data;
stats = &ieee->stats;
if (skb->len < 10) {
@@ -378,35 +367,51 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
frag = WLAN_GET_SEQ_FRAG(sc);
hdrlen = ieee80211_get_hdrlen(fc);
-#ifdef NOT_YET
-#if WIRELESS_EXT > 15
/* Put this code here so that we avoid duplicating it in all
* Rx paths. - Jean II */
#ifdef IW_WIRELESS_SPY /* defined in iw_handler.h */
/* If spy monitoring on */
- if (iface->spy_data.spy_number > 0) {
+ if (ieee->spy_data.spy_number > 0) {
struct iw_quality wstats;
- wstats.level = rx_stats->signal;
- wstats.noise = rx_stats->noise;
- wstats.updated = 6; /* No qual value */
+
+ wstats.updated = 0;
+ if (rx_stats->mask & IEEE80211_STATMASK_RSSI) {
+ wstats.level = rx_stats->rssi;
+ wstats.updated |= IW_QUAL_LEVEL_UPDATED;
+ } else
+ wstats.updated |= IW_QUAL_LEVEL_INVALID;
+
+ if (rx_stats->mask & IEEE80211_STATMASK_NOISE) {
+ wstats.noise = rx_stats->noise;
+ wstats.updated |= IW_QUAL_NOISE_UPDATED;
+ } else
+ wstats.updated |= IW_QUAL_NOISE_INVALID;
+
+ if (rx_stats->mask & IEEE80211_STATMASK_SIGNAL) {
+ wstats.qual = rx_stats->signal;
+ wstats.updated |= IW_QUAL_QUAL_UPDATED;
+ } else
+ wstats.updated |= IW_QUAL_QUAL_INVALID;
+
/* Update spy records */
- wireless_spy_update(dev, hdr->addr2, &wstats);
+ wireless_spy_update(ieee->dev, hdr->addr2, &wstats);
}
#endif /* IW_WIRELESS_SPY */
-#endif /* WIRELESS_EXT > 15 */
+
+#ifdef NOT_YET
hostap_update_rx_stats(local->ap, hdr, rx_stats);
#endif
-#if WIRELESS_EXT > 15
if (ieee->iw_mode == IW_MODE_MONITOR) {
ieee80211_monitor_rx(ieee, skb, rx_stats);
stats->rx_packets++;
stats->rx_bytes += skb->len;
return 1;
}
-#endif
- if (ieee->host_decrypt) {
+ if ((is_multicast_ether_addr(hdr->addr1) ||
+ is_broadcast_ether_addr(hdr->addr2)) ? ieee->host_mc_decrypt :
+ ieee->host_decrypt) {
int idx = 0;
if (skb->len >= hdrlen + 3)
idx = skb->data[hdrlen + 3] >> 6;
@@ -531,6 +536,9 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
/* Nullfunc frames may have PS-bit set, so they must be passed to
* hostap_handle_sta_rx() before being dropped here. */
+
+ stype &= ~IEEE80211_STYPE_QOS_DATA;
+
if (stype != IEEE80211_STYPE_DATA &&
stype != IEEE80211_STYPE_DATA_CFACK &&
stype != IEEE80211_STYPE_DATA_CFPOLL &&
@@ -549,7 +557,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
(keyidx = ieee80211_rx_frame_decrypt(ieee, skb, crypt)) < 0)
goto rx_dropped;
- hdr = (struct ieee80211_hdr *)skb->data;
+ hdr = (struct ieee80211_hdr_4addr *)skb->data;
/* skb: hdr + (possibly fragmented) plaintext payload */
// PR: FIXME: hostap has additional conditions in the "if" below:
@@ -603,7 +611,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
/* this was the last fragment and the frame will be
* delivered, so remove skb from fragment cache */
skb = frag_skb;
- hdr = (struct ieee80211_hdr *)skb->data;
+ hdr = (struct ieee80211_hdr_4addr *)skb->data;
ieee80211_frag_cache_invalidate(ieee, hdr);
}
@@ -613,7 +621,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
ieee80211_rx_frame_decrypt_msdu(ieee, skb, keyidx, crypt))
goto rx_dropped;
- hdr = (struct ieee80211_hdr *)skb->data;
+ hdr = (struct ieee80211_hdr_4addr *)skb->data;
if (crypt && !(fc & IEEE80211_FCTL_PROTECTED) && !ieee->open_wep) {
if ( /*ieee->ieee802_1x && */
ieee80211_is_eapol_frame(ieee, skb)) {
@@ -755,69 +763,179 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
#define MGMT_FRAME_FIXED_PART_LENGTH 0x24
-static inline int ieee80211_is_ofdm_rate(u8 rate)
+static u8 qos_oui[QOS_OUI_LEN] = { 0x00, 0x50, 0xF2 };
+
+/*
+* Make ther structure we read from the beacon packet has
+* the right values
+*/
+static int ieee80211_verify_qos_info(struct ieee80211_qos_information_element
+ *info_element, int sub_type)
{
- switch (rate & ~IEEE80211_BASIC_RATE_MASK) {
- case IEEE80211_OFDM_RATE_6MB:
- case IEEE80211_OFDM_RATE_9MB:
- case IEEE80211_OFDM_RATE_12MB:
- case IEEE80211_OFDM_RATE_18MB:
- case IEEE80211_OFDM_RATE_24MB:
- case IEEE80211_OFDM_RATE_36MB:
- case IEEE80211_OFDM_RATE_48MB:
- case IEEE80211_OFDM_RATE_54MB:
- return 1;
- }
+
+ if (info_element->qui_subtype != sub_type)
+ return -1;
+ if (memcmp(info_element->qui, qos_oui, QOS_OUI_LEN))
+ return -1;
+ if (info_element->qui_type != QOS_OUI_TYPE)
+ return -1;
+ if (info_element->version != QOS_VERSION_1)
+ return -1;
+
return 0;
}
-static inline int ieee80211_network_init(struct ieee80211_device *ieee,
- struct ieee80211_probe_response
- *beacon,
- struct ieee80211_network *network,
- struct ieee80211_rx_stats *stats)
+/*
+ * Parse a QoS parameter element
+ */
+static int ieee80211_read_qos_param_element(struct ieee80211_qos_parameter_info
+ *element_param, struct ieee80211_info_element
+ *info_element)
{
-#ifdef CONFIG_IEEE80211_DEBUG
- char rates_str[64];
- char *p;
-#endif
- struct ieee80211_info_element *info_element;
- u16 left;
- u8 i;
+ int ret = 0;
+ u16 size = sizeof(struct ieee80211_qos_parameter_info) - 2;
- /* Pull out fixed field data */
- memcpy(network->bssid, beacon->header.addr3, ETH_ALEN);
- network->capability = beacon->capability;
- network->last_scanned = jiffies;
- network->time_stamp[0] = beacon->time_stamp[0];
- network->time_stamp[1] = beacon->time_stamp[1];
- network->beacon_interval = beacon->beacon_interval;
- /* Where to pull this? beacon->listen_interval; */
- network->listen_interval = 0x0A;
- network->rates_len = network->rates_ex_len = 0;
- network->last_associate = 0;
- network->ssid_len = 0;
- network->flags = 0;
- network->atim_window = 0;
+ if ((info_element == NULL) || (element_param == NULL))
+ return -1;
- if (stats->freq == IEEE80211_52GHZ_BAND) {
- /* for A band (No DS info) */
- network->channel = stats->received_channel;
+ if (info_element->id == QOS_ELEMENT_ID && info_element->len == size) {
+ memcpy(element_param->info_element.qui, info_element->data,
+ info_element->len);
+ element_param->info_element.elementID = info_element->id;
+ element_param->info_element.length = info_element->len;
} else
- network->flags |= NETWORK_HAS_CCK;
+ ret = -1;
+ if (ret == 0)
+ ret = ieee80211_verify_qos_info(&element_param->info_element,
+ QOS_OUI_PARAM_SUB_TYPE);
+ return ret;
+}
- network->wpa_ie_len = 0;
- network->rsn_ie_len = 0;
+/*
+ * Parse a QoS information element
+ */
+static int ieee80211_read_qos_info_element(struct
+ ieee80211_qos_information_element
+ *element_info, struct ieee80211_info_element
+ *info_element)
+{
+ int ret = 0;
+ u16 size = sizeof(struct ieee80211_qos_information_element) - 2;
+
+ if (element_info == NULL)
+ return -1;
+ if (info_element == NULL)
+ return -1;
+
+ if ((info_element->id == QOS_ELEMENT_ID) && (info_element->len == size)) {
+ memcpy(element_info->qui, info_element->data,
+ info_element->len);
+ element_info->elementID = info_element->id;
+ element_info->length = info_element->len;
+ } else
+ ret = -1;
+
+ if (ret == 0)
+ ret = ieee80211_verify_qos_info(element_info,
+ QOS_OUI_INFO_SUB_TYPE);
+ return ret;
+}
+
+/*
+ * Write QoS parameters from the ac parameters.
+ */
+static int ieee80211_qos_convert_ac_to_parameters(struct
+ ieee80211_qos_parameter_info
+ *param_elm, struct
+ ieee80211_qos_parameters
+ *qos_param)
+{
+ int rc = 0;
+ int i;
+ struct ieee80211_qos_ac_parameter *ac_params;
+ u32 txop;
+ u8 cw_min;
+ u8 cw_max;
+
+ for (i = 0; i < QOS_QUEUE_NUM; i++) {
+ ac_params = &(param_elm->ac_params_record[i]);
+
+ qos_param->aifs[i] = (ac_params->aci_aifsn) & 0x0F;
+ qos_param->aifs[i] -= (qos_param->aifs[i] < 2) ? 0 : 2;
+
+ cw_min = ac_params->ecw_min_max & 0x0F;
+ qos_param->cw_min[i] = (u16) ((1 << cw_min) - 1);
+
+ cw_max = (ac_params->ecw_min_max & 0xF0) >> 4;
+ qos_param->cw_max[i] = (u16) ((1 << cw_max) - 1);
+
+ qos_param->flag[i] =
+ (ac_params->aci_aifsn & 0x10) ? 0x01 : 0x00;
+
+ txop = le16_to_cpu(ac_params->tx_op_limit) * 32;
+ qos_param->tx_op_limit[i] = (u16) txop;
+ }
+ return rc;
+}
+
+/*
+ * we have a generic data element which it may contain QoS information or
+ * parameters element. check the information element length to decide
+ * which type to read
+ */
+static int ieee80211_parse_qos_info_param_IE(struct ieee80211_info_element
+ *info_element,
+ struct ieee80211_network *network)
+{
+ int rc = 0;
+ struct ieee80211_qos_parameters *qos_param = NULL;
+ struct ieee80211_qos_information_element qos_info_element;
+
+ rc = ieee80211_read_qos_info_element(&qos_info_element, info_element);
+
+ if (rc == 0) {
+ network->qos_data.param_count = qos_info_element.ac_info & 0x0F;
+ network->flags |= NETWORK_HAS_QOS_INFORMATION;
+ } else {
+ struct ieee80211_qos_parameter_info param_element;
+
+ rc = ieee80211_read_qos_param_element(&param_element,
+ info_element);
+ if (rc == 0) {
+ qos_param = &(network->qos_data.parameters);
+ ieee80211_qos_convert_ac_to_parameters(&param_element,
+ qos_param);
+ network->flags |= NETWORK_HAS_QOS_PARAMETERS;
+ network->qos_data.param_count =
+ param_element.info_element.ac_info & 0x0F;
+ }
+ }
+
+ if (rc == 0) {
+ IEEE80211_DEBUG_QOS("QoS is supported\n");
+ network->qos_data.supported = 1;
+ }
+ return rc;
+}
+
+static int ieee80211_parse_info_param(struct ieee80211_info_element
+ *info_element, u16 length,
+ struct ieee80211_network *network)
+{
+ u8 i;
+#ifdef CONFIG_IEEE80211_DEBUG
+ char rates_str[64];
+ char *p;
+#endif
- info_element = &beacon->info_element;
- left = stats->len - ((void *)info_element - (void *)beacon);
- while (left >= sizeof(struct ieee80211_info_element_hdr)) {
- if (sizeof(struct ieee80211_info_element_hdr) +
- info_element->len > left) {
- IEEE80211_DEBUG_SCAN
- ("SCAN: parse failed: info_element->len + 2 > left : info_element->len+2=%Zd left=%d.\n",
- info_element->len +
- sizeof(struct ieee80211_info_element), left);
+ while (length >= sizeof(*info_element)) {
+ if (sizeof(*info_element) + info_element->len > length) {
+ IEEE80211_DEBUG_MGMT("Info elem: parse failed: "
+ "info_element->len + 2 > left : "
+ "info_element->len+2=%zd left=%d, id=%d.\n",
+ info_element->len +
+ sizeof(*info_element),
+ length, info_element->id);
return 1;
}
@@ -837,7 +955,7 @@ static inline int ieee80211_network_init(struct ieee80211_device *ieee,
memset(network->ssid + network->ssid_len, 0,
IW_ESSID_MAX_SIZE - network->ssid_len);
- IEEE80211_DEBUG_SCAN("MFIE_TYPE_SSID: '%s' len=%d.\n",
+ IEEE80211_DEBUG_MGMT("MFIE_TYPE_SSID: '%s' len=%d.\n",
network->ssid, network->ssid_len);
break;
@@ -845,15 +963,14 @@ static inline int ieee80211_network_init(struct ieee80211_device *ieee,
#ifdef CONFIG_IEEE80211_DEBUG
p = rates_str;
#endif
- network->rates_len =
- min(info_element->len, MAX_RATES_LENGTH);
+ network->rates_len = min(info_element->len,
+ MAX_RATES_LENGTH);
for (i = 0; i < network->rates_len; i++) {
network->rates[i] = info_element->data[i];
#ifdef CONFIG_IEEE80211_DEBUG
- p += snprintf(p,
- sizeof(rates_str) - (p -
- rates_str),
- "%02X ", network->rates[i]);
+ p += snprintf(p, sizeof(rates_str) -
+ (p - rates_str), "%02X ",
+ network->rates[i]);
#endif
if (ieee80211_is_ofdm_rate
(info_element->data[i])) {
@@ -865,7 +982,7 @@ static inline int ieee80211_network_init(struct ieee80211_device *ieee,
}
}
- IEEE80211_DEBUG_SCAN("MFIE_TYPE_RATES: '%s' (%d)\n",
+ IEEE80211_DEBUG_MGMT("MFIE_TYPE_RATES: '%s' (%d)\n",
rates_str, network->rates_len);
break;
@@ -873,15 +990,14 @@ static inline int ieee80211_network_init(struct ieee80211_device *ieee,
#ifdef CONFIG_IEEE80211_DEBUG
p = rates_str;
#endif
- network->rates_ex_len =
- min(info_element->len, MAX_RATES_EX_LENGTH);
+ network->rates_ex_len = min(info_element->len,
+ MAX_RATES_EX_LENGTH);
for (i = 0; i < network->rates_ex_len; i++) {
network->rates_ex[i] = info_element->data[i];
#ifdef CONFIG_IEEE80211_DEBUG
- p += snprintf(p,
- sizeof(rates_str) - (p -
- rates_str),
- "%02X ", network->rates[i]);
+ p += snprintf(p, sizeof(rates_str) -
+ (p - rates_str), "%02X ",
+ network->rates[i]);
#endif
if (ieee80211_is_ofdm_rate
(info_element->data[i])) {
@@ -893,40 +1009,51 @@ static inline int ieee80211_network_init(struct ieee80211_device *ieee,
}
}
- IEEE80211_DEBUG_SCAN("MFIE_TYPE_RATES_EX: '%s' (%d)\n",
+ IEEE80211_DEBUG_MGMT("MFIE_TYPE_RATES_EX: '%s' (%d)\n",
rates_str, network->rates_ex_len);
break;
case MFIE_TYPE_DS_SET:
- IEEE80211_DEBUG_SCAN("MFIE_TYPE_DS_SET: %d\n",
+ IEEE80211_DEBUG_MGMT("MFIE_TYPE_DS_SET: %d\n",
info_element->data[0]);
- if (stats->freq == IEEE80211_24GHZ_BAND)
- network->channel = info_element->data[0];
+ network->channel = info_element->data[0];
break;
case MFIE_TYPE_FH_SET:
- IEEE80211_DEBUG_SCAN("MFIE_TYPE_FH_SET: ignored\n");
+ IEEE80211_DEBUG_MGMT("MFIE_TYPE_FH_SET: ignored\n");
break;
case MFIE_TYPE_CF_SET:
- IEEE80211_DEBUG_SCAN("MFIE_TYPE_CF_SET: ignored\n");
+ IEEE80211_DEBUG_MGMT("MFIE_TYPE_CF_SET: ignored\n");
break;
case MFIE_TYPE_TIM:
- IEEE80211_DEBUG_SCAN("MFIE_TYPE_TIM: ignored\n");
+ IEEE80211_DEBUG_MGMT("MFIE_TYPE_TIM: ignored\n");
+ break;
+
+ case MFIE_TYPE_ERP_INFO:
+ network->erp_value = info_element->data[0];
+ IEEE80211_DEBUG_MGMT("MFIE_TYPE_ERP_SET: %d\n",
+ network->erp_value);
break;
case MFIE_TYPE_IBSS_SET:
- IEEE80211_DEBUG_SCAN("MFIE_TYPE_IBSS_SET: ignored\n");
+ network->atim_window = info_element->data[0];
+ IEEE80211_DEBUG_MGMT("MFIE_TYPE_IBSS_SET: %d\n",
+ network->atim_window);
break;
case MFIE_TYPE_CHALLENGE:
- IEEE80211_DEBUG_SCAN("MFIE_TYPE_CHALLENGE: ignored\n");
+ IEEE80211_DEBUG_MGMT("MFIE_TYPE_CHALLENGE: ignored\n");
break;
case MFIE_TYPE_GENERIC:
- IEEE80211_DEBUG_SCAN("MFIE_TYPE_GENERIC: %d bytes\n",
+ IEEE80211_DEBUG_MGMT("MFIE_TYPE_GENERIC: %d bytes\n",
info_element->len);
+ if (!ieee80211_parse_qos_info_param_IE(info_element,
+ network))
+ break;
+
if (info_element->len >= 4 &&
info_element->data[0] == 0x00 &&
info_element->data[1] == 0x50 &&
@@ -940,7 +1067,7 @@ static inline int ieee80211_network_init(struct ieee80211_device *ieee,
break;
case MFIE_TYPE_RSN:
- IEEE80211_DEBUG_SCAN("MFIE_TYPE_RSN: %d bytes\n",
+ IEEE80211_DEBUG_MGMT("MFIE_TYPE_RSN: %d bytes\n",
info_element->len);
network->rsn_ie_len = min(info_element->len + 2,
MAX_WPA_IE_LEN);
@@ -948,18 +1075,127 @@ static inline int ieee80211_network_init(struct ieee80211_device *ieee,
network->rsn_ie_len);
break;
+ case MFIE_TYPE_QOS_PARAMETER:
+ printk(KERN_ERR
+ "QoS Error need to parse QOS_PARAMETER IE\n");
+ break;
+
default:
- IEEE80211_DEBUG_SCAN("unsupported IE %d\n",
+ IEEE80211_DEBUG_MGMT("unsupported IE %d\n",
info_element->id);
break;
}
- left -= sizeof(struct ieee80211_info_element_hdr) +
- info_element->len;
- info_element = (struct ieee80211_info_element *)
- &info_element->data[info_element->len];
+ length -= sizeof(*info_element) + info_element->len;
+ info_element =
+ (struct ieee80211_info_element *)&info_element->
+ data[info_element->len];
+ }
+
+ return 0;
+}
+
+static int ieee80211_handle_assoc_resp(struct ieee80211_device *ieee, struct ieee80211_assoc_response
+ *frame, struct ieee80211_rx_stats *stats)
+{
+ struct ieee80211_network network_resp;
+ struct ieee80211_network *network = &network_resp;
+ struct net_device *dev = ieee->dev;
+
+ network->flags = 0;
+ network->qos_data.active = 0;
+ network->qos_data.supported = 0;
+ network->qos_data.param_count = 0;
+ network->qos_data.old_param_count = 0;
+
+ //network->atim_window = le16_to_cpu(frame->aid) & (0x3FFF);
+ network->atim_window = le16_to_cpu(frame->aid);
+ network->listen_interval = le16_to_cpu(frame->status);
+ memcpy(network->bssid, frame->header.addr3, ETH_ALEN);
+ network->capability = le16_to_cpu(frame->capability);
+ network->last_scanned = jiffies;
+ network->rates_len = network->rates_ex_len = 0;
+ network->last_associate = 0;
+ network->ssid_len = 0;
+ network->erp_value =
+ (network->capability & WLAN_CAPABILITY_IBSS) ? 0x3 : 0x0;
+
+ if (stats->freq == IEEE80211_52GHZ_BAND) {
+ /* for A band (No DS info) */
+ network->channel = stats->received_channel;
+ } else
+ network->flags |= NETWORK_HAS_CCK;
+
+ network->wpa_ie_len = 0;
+ network->rsn_ie_len = 0;
+
+ if (ieee80211_parse_info_param
+ (frame->info_element, stats->len - sizeof(*frame), network))
+ return 1;
+
+ network->mode = 0;
+ if (stats->freq == IEEE80211_52GHZ_BAND)
+ network->mode = IEEE_A;
+ else {
+ if (network->flags & NETWORK_HAS_OFDM)
+ network->mode |= IEEE_G;
+ if (network->flags & NETWORK_HAS_CCK)
+ network->mode |= IEEE_B;
}
+ if (ieee80211_is_empty_essid(network->ssid, network->ssid_len))
+ network->flags |= NETWORK_EMPTY_ESSID;
+
+ memcpy(&network->stats, stats, sizeof(network->stats));
+
+ if (ieee->handle_assoc_response != NULL)
+ ieee->handle_assoc_response(dev, frame, network);
+
+ return 0;
+}
+
+/***************************************************/
+
+static inline int ieee80211_network_init(struct ieee80211_device *ieee, struct ieee80211_probe_response
+ *beacon,
+ struct ieee80211_network *network,
+ struct ieee80211_rx_stats *stats)
+{
+ network->qos_data.active = 0;
+ network->qos_data.supported = 0;
+ network->qos_data.param_count = 0;
+ network->qos_data.old_param_count = 0;
+
+ /* Pull out fixed field data */
+ memcpy(network->bssid, beacon->header.addr3, ETH_ALEN);
+ network->capability = le16_to_cpu(beacon->capability);
+ network->last_scanned = jiffies;
+ network->time_stamp[0] = le32_to_cpu(beacon->time_stamp[0]);
+ network->time_stamp[1] = le32_to_cpu(beacon->time_stamp[1]);
+ network->beacon_interval = le16_to_cpu(beacon->beacon_interval);
+ /* Where to pull this? beacon->listen_interval; */
+ network->listen_interval = 0x0A;
+ network->rates_len = network->rates_ex_len = 0;
+ network->last_associate = 0;
+ network->ssid_len = 0;
+ network->flags = 0;
+ network->atim_window = 0;
+ network->erp_value = (network->capability & WLAN_CAPABILITY_IBSS) ?
+ 0x3 : 0x0;
+
+ if (stats->freq == IEEE80211_52GHZ_BAND) {
+ /* for A band (No DS info) */
+ network->channel = stats->received_channel;
+ } else
+ network->flags |= NETWORK_HAS_CCK;
+
+ network->wpa_ie_len = 0;
+ network->rsn_ie_len = 0;
+
+ if (ieee80211_parse_info_param
+ (beacon->info_element, stats->len - sizeof(*beacon), network))
+ return 1;
+
network->mode = 0;
if (stats->freq == IEEE80211_52GHZ_BAND)
network->mode = IEEE_A;
@@ -1002,6 +1238,9 @@ static inline int is_same_network(struct ieee80211_network *src,
static inline void update_network(struct ieee80211_network *dst,
struct ieee80211_network *src)
{
+ int qos_active;
+ u8 old_param;
+
memcpy(&dst->stats, &src->stats, sizeof(struct ieee80211_rx_stats));
dst->capability = src->capability;
memcpy(dst->rates, src->rates, src->rates_len);
@@ -1017,6 +1256,7 @@ static inline void update_network(struct ieee80211_network *dst,
dst->beacon_interval = src->beacon_interval;
dst->listen_interval = src->listen_interval;
dst->atim_window = src->atim_window;
+ dst->erp_value = src->erp_value;
memcpy(dst->wpa_ie, src->wpa_ie, src->wpa_ie_len);
dst->wpa_ie_len = src->wpa_ie_len;
@@ -1024,22 +1264,48 @@ static inline void update_network(struct ieee80211_network *dst,
dst->rsn_ie_len = src->rsn_ie_len;
dst->last_scanned = jiffies;
+ qos_active = src->qos_data.active;
+ old_param = dst->qos_data.old_param_count;
+ if (dst->flags & NETWORK_HAS_QOS_MASK)
+ memcpy(&dst->qos_data, &src->qos_data,
+ sizeof(struct ieee80211_qos_data));
+ else {
+ dst->qos_data.supported = src->qos_data.supported;
+ dst->qos_data.param_count = src->qos_data.param_count;
+ }
+
+ if (dst->qos_data.supported == 1) {
+ if (dst->ssid_len)
+ IEEE80211_DEBUG_QOS
+ ("QoS the network %s is QoS supported\n",
+ dst->ssid);
+ else
+ IEEE80211_DEBUG_QOS
+ ("QoS the network is QoS supported\n");
+ }
+ dst->qos_data.active = qos_active;
+ dst->qos_data.old_param_count = old_param;
+
/* dst->last_associate is not overwritten */
}
+static inline int is_beacon(int fc)
+{
+ return (WLAN_FC_GET_STYPE(le16_to_cpu(fc)) == IEEE80211_STYPE_BEACON);
+}
+
static inline void ieee80211_process_probe_response(struct ieee80211_device
- *ieee,
- struct
+ *ieee, struct
ieee80211_probe_response
- *beacon,
- struct ieee80211_rx_stats
+ *beacon, struct ieee80211_rx_stats
*stats)
{
+ struct net_device *dev = ieee->dev;
struct ieee80211_network network;
struct ieee80211_network *target;
struct ieee80211_network *oldest = NULL;
#ifdef CONFIG_IEEE80211_DEBUG
- struct ieee80211_info_element *info_element = &beacon->info_element;
+ struct ieee80211_info_element *info_element = beacon->info_element;
#endif
unsigned long flags;
@@ -1070,10 +1336,10 @@ static inline void ieee80211_process_probe_response(struct ieee80211_device
escape_essid(info_element->data,
info_element->len),
MAC_ARG(beacon->header.addr3),
- WLAN_FC_GET_STYPE(beacon->header.
- frame_ctl) ==
- IEEE80211_STYPE_PROBE_RESP ?
- "PROBE RESPONSE" : "BEACON");
+ is_beacon(le16_to_cpu
+ (beacon->header.
+ frame_ctl)) ?
+ "BEACON" : "PROBE RESPONSE");
return;
}
@@ -1122,10 +1388,10 @@ static inline void ieee80211_process_probe_response(struct ieee80211_device
escape_essid(network.ssid,
network.ssid_len),
MAC_ARG(network.bssid),
- WLAN_FC_GET_STYPE(beacon->header.
- frame_ctl) ==
- IEEE80211_STYPE_PROBE_RESP ?
- "PROBE RESPONSE" : "BEACON");
+ is_beacon(le16_to_cpu
+ (beacon->header.
+ frame_ctl)) ?
+ "BEACON" : "PROBE RESPONSE");
#endif
memcpy(target, &network, sizeof(*target));
list_add_tail(&target->list, &ieee->network_list);
@@ -1134,34 +1400,60 @@ static inline void ieee80211_process_probe_response(struct ieee80211_device
escape_essid(target->ssid,
target->ssid_len),
MAC_ARG(target->bssid),
- WLAN_FC_GET_STYPE(beacon->header.
- frame_ctl) ==
- IEEE80211_STYPE_PROBE_RESP ?
- "PROBE RESPONSE" : "BEACON");
+ is_beacon(le16_to_cpu
+ (beacon->header.
+ frame_ctl)) ?
+ "BEACON" : "PROBE RESPONSE");
update_network(target, &network);
}
spin_unlock_irqrestore(&ieee->lock, flags);
+
+ if (is_beacon(le16_to_cpu(beacon->header.frame_ctl))) {
+ if (ieee->handle_beacon != NULL)
+ ieee->handle_beacon(dev, beacon, &network);
+ } else {
+ if (ieee->handle_probe_response != NULL)
+ ieee->handle_probe_response(dev, beacon, &network);
+ }
}
void ieee80211_rx_mgt(struct ieee80211_device *ieee,
- struct ieee80211_hdr *header,
+ struct ieee80211_hdr_4addr *header,
struct ieee80211_rx_stats *stats)
{
- switch (WLAN_FC_GET_STYPE(header->frame_ctl)) {
+ switch (WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl))) {
case IEEE80211_STYPE_ASSOC_RESP:
IEEE80211_DEBUG_MGMT("received ASSOCIATION RESPONSE (%d)\n",
- WLAN_FC_GET_STYPE(header->frame_ctl));
+ WLAN_FC_GET_STYPE(le16_to_cpu
+ (header->frame_ctl)));
+ ieee80211_handle_assoc_resp(ieee,
+ (struct ieee80211_assoc_response *)
+ header, stats);
break;
case IEEE80211_STYPE_REASSOC_RESP:
IEEE80211_DEBUG_MGMT("received REASSOCIATION RESPONSE (%d)\n",
- WLAN_FC_GET_STYPE(header->frame_ctl));
+ WLAN_FC_GET_STYPE(le16_to_cpu
+ (header->frame_ctl)));
+ break;
+
+ case IEEE80211_STYPE_PROBE_REQ:
+ IEEE80211_DEBUG_MGMT("recieved auth (%d)\n",
+ WLAN_FC_GET_STYPE(le16_to_cpu
+ (header->frame_ctl)));
+
+ if (ieee->handle_probe_request != NULL)
+ ieee->handle_probe_request(ieee->dev,
+ (struct
+ ieee80211_probe_request *)
+ header, stats);
break;
case IEEE80211_STYPE_PROBE_RESP:
IEEE80211_DEBUG_MGMT("received PROBE RESPONSE (%d)\n",
- WLAN_FC_GET_STYPE(header->frame_ctl));
+ WLAN_FC_GET_STYPE(le16_to_cpu
+ (header->frame_ctl)));
IEEE80211_DEBUG_SCAN("Probe response\n");
ieee80211_process_probe_response(ieee,
(struct
@@ -1171,20 +1463,46 @@ void ieee80211_rx_mgt(struct ieee80211_device *ieee,
case IEEE80211_STYPE_BEACON:
IEEE80211_DEBUG_MGMT("received BEACON (%d)\n",
- WLAN_FC_GET_STYPE(header->frame_ctl));
+ WLAN_FC_GET_STYPE(le16_to_cpu
+ (header->frame_ctl)));
IEEE80211_DEBUG_SCAN("Beacon\n");
ieee80211_process_probe_response(ieee,
(struct
ieee80211_probe_response *)
header, stats);
break;
+ case IEEE80211_STYPE_AUTH:
+ IEEE80211_DEBUG_MGMT("recieved auth (%d)\n",
+ WLAN_FC_GET_STYPE(le16_to_cpu
+ (header->frame_ctl)));
+
+ if (ieee->handle_auth != NULL)
+ ieee->handle_auth(ieee->dev,
+ (struct ieee80211_auth *)header);
+ break;
+
+ case IEEE80211_STYPE_DISASSOC:
+ if (ieee->handle_disassoc != NULL)
+ ieee->handle_disassoc(ieee->dev,
+ (struct ieee80211_disassoc *)
+ header);
+ break;
+
+ case IEEE80211_STYPE_DEAUTH:
+ printk("DEAUTH from AP\n");
+ if (ieee->handle_deauth != NULL)
+ ieee->handle_deauth(ieee->dev, (struct ieee80211_auth *)
+ header);
+ break;
default:
IEEE80211_DEBUG_MGMT("received UNKNOWN (%d)\n",
- WLAN_FC_GET_STYPE(header->frame_ctl));
+ WLAN_FC_GET_STYPE(le16_to_cpu
+ (header->frame_ctl)));
IEEE80211_WARNING("%s: Unknown management packet: %d\n",
ieee->dev->name,
- WLAN_FC_GET_STYPE(header->frame_ctl));
+ WLAN_FC_GET_STYPE(le16_to_cpu
+ (header->frame_ctl)));
break;
}
}
diff --git a/net/ieee80211/ieee80211_tx.c b/net/ieee80211/ieee80211_tx.c
index eed07bbbe6b..95ccbadbf55 100644
--- a/net/ieee80211/ieee80211_tx.c
+++ b/net/ieee80211/ieee80211_tx.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright(c) 2003 - 2004 Intel Corporation. All rights reserved.
+ Copyright(c) 2003 - 2005 Intel Corporation. All rights reserved.
This program is free software; you can redistribute it and/or modify it
under the terms of version 2 of the GNU General Public License as
@@ -128,7 +128,7 @@ payload of each frame is reduced to 492 bytes.
static u8 P802_1H_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0xf8 };
static u8 RFC1042_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0x00 };
-static inline int ieee80211_put_snap(u8 * data, u16 h_proto)
+static inline int ieee80211_copy_snap(u8 * data, u16 h_proto)
{
struct ieee80211_snap_hdr *snap;
u8 *oui;
@@ -157,31 +157,14 @@ static inline int ieee80211_encrypt_fragment(struct ieee80211_device *ieee,
struct ieee80211_crypt_data *crypt = ieee->crypt[ieee->tx_keyidx];
int res;
-#ifdef CONFIG_IEEE80211_CRYPT_TKIP
- struct ieee80211_hdr *header;
-
- if (ieee->tkip_countermeasures &&
- crypt && crypt->ops && strcmp(crypt->ops->name, "TKIP") == 0) {
- header = (struct ieee80211_hdr *)frag->data;
- if (net_ratelimit()) {
- printk(KERN_DEBUG "%s: TKIP countermeasures: dropped "
- "TX packet to " MAC_FMT "\n",
- ieee->dev->name, MAC_ARG(header->addr1));
- }
+ if (crypt == NULL)
return -1;
- }
-#endif
+
/* To encrypt, frame format is:
* IV (4 bytes), clear payload (including SNAP), ICV (4 bytes) */
-
- // PR: FIXME: Copied from hostap. Check fragmentation/MSDU/MPDU encryption.
- /* Host-based IEEE 802.11 fragmentation for TX is not yet supported, so
- * call both MSDU and MPDU encryption functions from here. */
atomic_inc(&crypt->refcnt);
res = 0;
- if (crypt->ops->encrypt_msdu)
- res = crypt->ops->encrypt_msdu(frag, hdr_len, crypt->priv);
- if (res == 0 && crypt->ops->encrypt_mpdu)
+ if (crypt->ops && crypt->ops->encrypt_mpdu)
res = crypt->ops->encrypt_mpdu(frag, hdr_len, crypt->priv);
atomic_dec(&crypt->refcnt);
@@ -207,7 +190,7 @@ void ieee80211_txb_free(struct ieee80211_txb *txb)
}
static struct ieee80211_txb *ieee80211_alloc_txb(int nr_frags, int txb_size,
- gfp_t gfp_mask)
+ int headroom, gfp_t gfp_mask)
{
struct ieee80211_txb *txb;
int i;
@@ -221,11 +204,13 @@ static struct ieee80211_txb *ieee80211_alloc_txb(int nr_frags, int txb_size,
txb->frag_size = txb_size;
for (i = 0; i < nr_frags; i++) {
- txb->fragments[i] = dev_alloc_skb(txb_size);
+ txb->fragments[i] = __dev_alloc_skb(txb_size + headroom,
+ gfp_mask);
if (unlikely(!txb->fragments[i])) {
i--;
break;
}
+ skb_reserve(txb->fragments[i], headroom);
}
if (unlikely(i != nr_frags)) {
while (i >= 0)
@@ -236,25 +221,31 @@ static struct ieee80211_txb *ieee80211_alloc_txb(int nr_frags, int txb_size,
return txb;
}
-/* SKBs are added to the ieee->tx_queue. */
+/* Incoming skb is converted to a txb which consists of
+ * a block of 802.11 fragment packets (stored as skbs) */
int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ieee80211_device *ieee = netdev_priv(dev);
struct ieee80211_txb *txb = NULL;
- struct ieee80211_hdr *frag_hdr;
- int i, bytes_per_frag, nr_frags, bytes_last_frag, frag_size;
+ struct ieee80211_hdr_3addr *frag_hdr;
+ int i, bytes_per_frag, nr_frags, bytes_last_frag, frag_size,
+ rts_required;
unsigned long flags;
struct net_device_stats *stats = &ieee->stats;
- int ether_type, encrypt;
+ int ether_type, encrypt, host_encrypt, host_encrypt_msdu, host_build_iv;
int bytes, fc, hdr_len;
struct sk_buff *skb_frag;
- struct ieee80211_hdr header = { /* Ensure zero initialized */
+ struct ieee80211_hdr_3addr header = { /* Ensure zero initialized */
.duration_id = 0,
.seq_ctl = 0
};
u8 dest[ETH_ALEN], src[ETH_ALEN];
-
struct ieee80211_crypt_data *crypt;
+ int priority = skb->priority;
+ int snapped = 0;
+
+ if (ieee->is_queue_full && (*ieee->is_queue_full) (dev, priority))
+ return NETDEV_TX_BUSY;
spin_lock_irqsave(&ieee->lock, flags);
@@ -276,7 +267,11 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
crypt = ieee->crypt[ieee->tx_keyidx];
encrypt = !(ether_type == ETH_P_PAE && ieee->ieee802_1x) &&
- ieee->host_encrypt && crypt && crypt->ops;
+ ieee->sec.encrypt;
+
+ host_encrypt = ieee->host_encrypt && encrypt && crypt;
+ host_encrypt_msdu = ieee->host_encrypt_msdu && encrypt && crypt;
+ host_build_iv = ieee->host_build_iv && encrypt && crypt;
if (!encrypt && ieee->ieee802_1x &&
ieee->drop_unencrypted && ether_type != ETH_P_PAE) {
@@ -285,8 +280,8 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
}
/* Save source and destination addresses */
- memcpy(&dest, skb->data, ETH_ALEN);
- memcpy(&src, skb->data + ETH_ALEN, ETH_ALEN);
+ memcpy(dest, skb->data, ETH_ALEN);
+ memcpy(src, skb->data + ETH_ALEN, ETH_ALEN);
/* Advance the SKB to the start of the payload */
skb_pull(skb, sizeof(struct ethhdr));
@@ -294,7 +289,7 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
/* Determine total amount of storage required for TXB packets */
bytes = skb->len + SNAP_SIZE + sizeof(u16);
- if (encrypt)
+ if (host_encrypt)
fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA |
IEEE80211_FCTL_PROTECTED;
else
@@ -302,70 +297,144 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
if (ieee->iw_mode == IW_MODE_INFRA) {
fc |= IEEE80211_FCTL_TODS;
- /* To DS: Addr1 = BSSID, Addr2 = SA,
- Addr3 = DA */
- memcpy(&header.addr1, ieee->bssid, ETH_ALEN);
- memcpy(&header.addr2, &src, ETH_ALEN);
- memcpy(&header.addr3, &dest, ETH_ALEN);
+ /* To DS: Addr1 = BSSID, Addr2 = SA, Addr3 = DA */
+ memcpy(header.addr1, ieee->bssid, ETH_ALEN);
+ memcpy(header.addr2, src, ETH_ALEN);
+ memcpy(header.addr3, dest, ETH_ALEN);
} else if (ieee->iw_mode == IW_MODE_ADHOC) {
- /* not From/To DS: Addr1 = DA, Addr2 = SA,
- Addr3 = BSSID */
- memcpy(&header.addr1, dest, ETH_ALEN);
- memcpy(&header.addr2, src, ETH_ALEN);
- memcpy(&header.addr3, ieee->bssid, ETH_ALEN);
+ /* not From/To DS: Addr1 = DA, Addr2 = SA, Addr3 = BSSID */
+ memcpy(header.addr1, dest, ETH_ALEN);
+ memcpy(header.addr2, src, ETH_ALEN);
+ memcpy(header.addr3, ieee->bssid, ETH_ALEN);
}
header.frame_ctl = cpu_to_le16(fc);
hdr_len = IEEE80211_3ADDR_LEN;
- /* Determine fragmentation size based on destination (multicast
- * and broadcast are not fragmented) */
- if (is_multicast_ether_addr(dest) || is_broadcast_ether_addr(dest))
- frag_size = MAX_FRAG_THRESHOLD;
- else
- frag_size = ieee->fts;
+ /* Encrypt msdu first on the whole data packet. */
+ if ((host_encrypt || host_encrypt_msdu) &&
+ crypt && crypt->ops && crypt->ops->encrypt_msdu) {
+ int res = 0;
+ int len = bytes + hdr_len + crypt->ops->extra_msdu_prefix_len +
+ crypt->ops->extra_msdu_postfix_len;
+ struct sk_buff *skb_new = dev_alloc_skb(len);
+
+ if (unlikely(!skb_new))
+ goto failed;
+
+ skb_reserve(skb_new, crypt->ops->extra_msdu_prefix_len);
+ memcpy(skb_put(skb_new, hdr_len), &header, hdr_len);
+ snapped = 1;
+ ieee80211_copy_snap(skb_put(skb_new, SNAP_SIZE + sizeof(u16)),
+ ether_type);
+ memcpy(skb_put(skb_new, skb->len), skb->data, skb->len);
+ res = crypt->ops->encrypt_msdu(skb_new, hdr_len, crypt->priv);
+ if (res < 0) {
+ IEEE80211_ERROR("msdu encryption failed\n");
+ dev_kfree_skb_any(skb_new);
+ goto failed;
+ }
+ dev_kfree_skb_any(skb);
+ skb = skb_new;
+ bytes += crypt->ops->extra_msdu_prefix_len +
+ crypt->ops->extra_msdu_postfix_len;
+ skb_pull(skb, hdr_len);
+ }
- /* Determine amount of payload per fragment. Regardless of if
- * this stack is providing the full 802.11 header, one will
- * eventually be affixed to this fragment -- so we must account for
- * it when determining the amount of payload space. */
- bytes_per_frag = frag_size - IEEE80211_3ADDR_LEN;
- if (ieee->config &
- (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS))
- bytes_per_frag -= IEEE80211_FCS_LEN;
-
- /* Each fragment may need to have room for encryptiong pre/postfix */
- if (encrypt)
- bytes_per_frag -= crypt->ops->extra_prefix_len +
- crypt->ops->extra_postfix_len;
-
- /* Number of fragments is the total bytes_per_frag /
- * payload_per_fragment */
- nr_frags = bytes / bytes_per_frag;
- bytes_last_frag = bytes % bytes_per_frag;
- if (bytes_last_frag)
+ if (host_encrypt || ieee->host_open_frag) {
+ /* Determine fragmentation size based on destination (multicast
+ * and broadcast are not fragmented) */
+ if (is_multicast_ether_addr(dest) ||
+ is_broadcast_ether_addr(dest))
+ frag_size = MAX_FRAG_THRESHOLD;
+ else
+ frag_size = ieee->fts;
+
+ /* Determine amount of payload per fragment. Regardless of if
+ * this stack is providing the full 802.11 header, one will
+ * eventually be affixed to this fragment -- so we must account
+ * for it when determining the amount of payload space. */
+ bytes_per_frag = frag_size - IEEE80211_3ADDR_LEN;
+ if (ieee->config &
+ (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS))
+ bytes_per_frag -= IEEE80211_FCS_LEN;
+
+ /* Each fragment may need to have room for encryptiong
+ * pre/postfix */
+ if (host_encrypt)
+ bytes_per_frag -= crypt->ops->extra_mpdu_prefix_len +
+ crypt->ops->extra_mpdu_postfix_len;
+
+ /* Number of fragments is the total
+ * bytes_per_frag / payload_per_fragment */
+ nr_frags = bytes / bytes_per_frag;
+ bytes_last_frag = bytes % bytes_per_frag;
+ if (bytes_last_frag)
+ nr_frags++;
+ else
+ bytes_last_frag = bytes_per_frag;
+ } else {
+ nr_frags = 1;
+ bytes_per_frag = bytes_last_frag = bytes;
+ frag_size = bytes + IEEE80211_3ADDR_LEN;
+ }
+
+ rts_required = (frag_size > ieee->rts
+ && ieee->config & CFG_IEEE80211_RTS);
+ if (rts_required)
nr_frags++;
- else
- bytes_last_frag = bytes_per_frag;
/* When we allocate the TXB we allocate enough space for the reserve
* and full fragment bytes (bytes_per_frag doesn't include prefix,
* postfix, header, FCS, etc.) */
- txb = ieee80211_alloc_txb(nr_frags, frag_size, GFP_ATOMIC);
+ txb = ieee80211_alloc_txb(nr_frags, frag_size,
+ ieee->tx_headroom, GFP_ATOMIC);
if (unlikely(!txb)) {
printk(KERN_WARNING "%s: Could not allocate TXB\n",
ieee->dev->name);
goto failed;
}
txb->encrypted = encrypt;
- txb->payload_size = bytes;
+ if (host_encrypt)
+ txb->payload_size = frag_size * (nr_frags - 1) +
+ bytes_last_frag;
+ else
+ txb->payload_size = bytes;
+
+ if (rts_required) {
+ skb_frag = txb->fragments[0];
+ frag_hdr =
+ (struct ieee80211_hdr_3addr *)skb_put(skb_frag, hdr_len);
+
+ /*
+ * Set header frame_ctl to the RTS.
+ */
+ header.frame_ctl =
+ cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_RTS);
+ memcpy(frag_hdr, &header, hdr_len);
- for (i = 0; i < nr_frags; i++) {
+ /*
+ * Restore header frame_ctl to the original data setting.
+ */
+ header.frame_ctl = cpu_to_le16(fc);
+
+ if (ieee->config &
+ (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS))
+ skb_put(skb_frag, 4);
+
+ txb->rts_included = 1;
+ i = 1;
+ } else
+ i = 0;
+
+ for (; i < nr_frags; i++) {
skb_frag = txb->fragments[i];
- if (encrypt)
- skb_reserve(skb_frag, crypt->ops->extra_prefix_len);
+ if (host_encrypt || host_build_iv)
+ skb_reserve(skb_frag,
+ crypt->ops->extra_mpdu_prefix_len);
- frag_hdr = (struct ieee80211_hdr *)skb_put(skb_frag, hdr_len);
+ frag_hdr =
+ (struct ieee80211_hdr_3addr *)skb_put(skb_frag, hdr_len);
memcpy(frag_hdr, &header, hdr_len);
/* If this is not the last fragment, then add the MOREFRAGS
@@ -379,11 +448,10 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
bytes = bytes_last_frag;
}
- /* Put a SNAP header on the first fragment */
- if (i == 0) {
- ieee80211_put_snap(skb_put
- (skb_frag, SNAP_SIZE + sizeof(u16)),
- ether_type);
+ if (i == 0 && !snapped) {
+ ieee80211_copy_snap(skb_put
+ (skb_frag, SNAP_SIZE + sizeof(u16)),
+ ether_type);
bytes -= SNAP_SIZE + sizeof(u16);
}
@@ -394,8 +462,19 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
/* Encryption routine will move the header forward in order
* to insert the IV between the header and the payload */
- if (encrypt)
+ if (host_encrypt)
ieee80211_encrypt_fragment(ieee, skb_frag, hdr_len);
+ else if (host_build_iv) {
+ struct ieee80211_crypt_data *crypt;
+
+ crypt = ieee->crypt[ieee->tx_keyidx];
+ atomic_inc(&crypt->refcnt);
+ if (crypt->ops->build_iv)
+ crypt->ops->build_iv(skb_frag, hdr_len,
+ crypt->priv);
+ atomic_dec(&crypt->refcnt);
+ }
+
if (ieee->config &
(CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS))
skb_put(skb_frag, 4);
@@ -407,11 +486,20 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
dev_kfree_skb_any(skb);
if (txb) {
- if ((*ieee->hard_start_xmit) (txb, dev) == 0) {
+ int ret = (*ieee->hard_start_xmit) (txb, dev, priority);
+ if (ret == 0) {
stats->tx_packets++;
stats->tx_bytes += txb->payload_size;
return 0;
}
+
+ if (ret == NETDEV_TX_BUSY) {
+ printk(KERN_ERR "%s: NETDEV_TX_BUSY returned; "
+ "driver should report queue full via "
+ "ieee_device->is_queue_full.\n",
+ ieee->dev->name);
+ }
+
ieee80211_txb_free(txb);
}
@@ -422,7 +510,72 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
netif_stop_queue(dev);
stats->tx_errors++;
return 1;
+}
+
+/* Incoming 802.11 strucure is converted to a TXB
+ * a block of 802.11 fragment packets (stored as skbs) */
+int ieee80211_tx_frame(struct ieee80211_device *ieee,
+ struct ieee80211_hdr *frame, int len)
+{
+ struct ieee80211_txb *txb = NULL;
+ unsigned long flags;
+ struct net_device_stats *stats = &ieee->stats;
+ struct sk_buff *skb_frag;
+ int priority = -1;
+
+ spin_lock_irqsave(&ieee->lock, flags);
+ /* If there is no driver handler to take the TXB, dont' bother
+ * creating it... */
+ if (!ieee->hard_start_xmit) {
+ printk(KERN_WARNING "%s: No xmit handler.\n", ieee->dev->name);
+ goto success;
+ }
+
+ if (unlikely(len < 24)) {
+ printk(KERN_WARNING "%s: skb too small (%d).\n",
+ ieee->dev->name, len);
+ goto success;
+ }
+
+ /* When we allocate the TXB we allocate enough space for the reserve
+ * and full fragment bytes (bytes_per_frag doesn't include prefix,
+ * postfix, header, FCS, etc.) */
+ txb = ieee80211_alloc_txb(1, len, ieee->tx_headroom, GFP_ATOMIC);
+ if (unlikely(!txb)) {
+ printk(KERN_WARNING "%s: Could not allocate TXB\n",
+ ieee->dev->name);
+ goto failed;
+ }
+ txb->encrypted = 0;
+ txb->payload_size = len;
+
+ skb_frag = txb->fragments[0];
+
+ memcpy(skb_put(skb_frag, len), frame, len);
+
+ if (ieee->config &
+ (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS))
+ skb_put(skb_frag, 4);
+
+ success:
+ spin_unlock_irqrestore(&ieee->lock, flags);
+
+ if (txb) {
+ if ((*ieee->hard_start_xmit) (txb, ieee->dev, priority) == 0) {
+ stats->tx_packets++;
+ stats->tx_bytes += txb->payload_size;
+ return 0;
+ }
+ ieee80211_txb_free(txb);
+ }
+ return 0;
+
+ failed:
+ spin_unlock_irqrestore(&ieee->lock, flags);
+ stats->tx_errors++;
+ return 1;
}
+EXPORT_SYMBOL(ieee80211_tx_frame);
EXPORT_SYMBOL(ieee80211_txb_free);
diff --git a/net/ieee80211/ieee80211_wx.c b/net/ieee80211/ieee80211_wx.c
index 94882f39b07..1ce7af9bec3 100644
--- a/net/ieee80211/ieee80211_wx.c
+++ b/net/ieee80211/ieee80211_wx.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright(c) 2004 Intel Corporation. All rights reserved.
+ Copyright(c) 2004-2005 Intel Corporation. All rights reserved.
Portions of this file are based on the WEP enablement code provided by the
Host AP project hostap-drivers v0.1.3
@@ -32,6 +32,7 @@
#include <linux/kmod.h>
#include <linux/module.h>
+#include <linux/jiffies.h>
#include <net/ieee80211.h>
#include <linux/wireless.h>
@@ -140,18 +141,41 @@ static inline char *ipw2100_translate_scan(struct ieee80211_device *ieee,
start = iwe_stream_add_point(start, stop, &iwe, custom);
/* Add quality statistics */
- /* TODO: Fix these values... */
iwe.cmd = IWEVQUAL;
- iwe.u.qual.qual = network->stats.signal;
- iwe.u.qual.level = network->stats.rssi;
- iwe.u.qual.noise = network->stats.noise;
- iwe.u.qual.updated = network->stats.mask & IEEE80211_STATMASK_WEMASK;
- if (!(network->stats.mask & IEEE80211_STATMASK_RSSI))
- iwe.u.qual.updated |= IW_QUAL_LEVEL_INVALID;
- if (!(network->stats.mask & IEEE80211_STATMASK_NOISE))
+ iwe.u.qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED |
+ IW_QUAL_NOISE_UPDATED;
+
+ if (!(network->stats.mask & IEEE80211_STATMASK_RSSI)) {
+ iwe.u.qual.updated |= IW_QUAL_QUAL_INVALID |
+ IW_QUAL_LEVEL_INVALID;
+ iwe.u.qual.qual = 0;
+ iwe.u.qual.level = 0;
+ } else {
+ iwe.u.qual.level = network->stats.rssi;
+ if (ieee->perfect_rssi == ieee->worst_rssi)
+ iwe.u.qual.qual = 100;
+ else
+ iwe.u.qual.qual =
+ (100 *
+ (ieee->perfect_rssi - ieee->worst_rssi) *
+ (ieee->perfect_rssi - ieee->worst_rssi) -
+ (ieee->perfect_rssi - network->stats.rssi) *
+ (15 * (ieee->perfect_rssi - ieee->worst_rssi) +
+ 62 * (ieee->perfect_rssi - network->stats.rssi))) /
+ ((ieee->perfect_rssi - ieee->worst_rssi) *
+ (ieee->perfect_rssi - ieee->worst_rssi));
+ if (iwe.u.qual.qual > 100)
+ iwe.u.qual.qual = 100;
+ else if (iwe.u.qual.qual < 1)
+ iwe.u.qual.qual = 0;
+ }
+
+ if (!(network->stats.mask & IEEE80211_STATMASK_NOISE)) {
iwe.u.qual.updated |= IW_QUAL_NOISE_INVALID;
- if (!(network->stats.mask & IEEE80211_STATMASK_SIGNAL))
- iwe.u.qual.updated |= IW_QUAL_QUAL_INVALID;
+ iwe.u.qual.noise = 0;
+ } else {
+ iwe.u.qual.noise = network->stats.noise;
+ }
start = iwe_stream_add_event(start, stop, &iwe, IW_EV_QUAL_LEN);
@@ -162,7 +186,7 @@ static inline char *ipw2100_translate_scan(struct ieee80211_device *ieee,
if (iwe.u.data.length)
start = iwe_stream_add_point(start, stop, &iwe, custom);
- if (ieee->wpa_enabled && network->wpa_ie_len) {
+ if (network->wpa_ie_len) {
char buf[MAX_WPA_IE_LEN * 2 + 30];
u8 *p = buf;
@@ -177,7 +201,7 @@ static inline char *ipw2100_translate_scan(struct ieee80211_device *ieee,
start = iwe_stream_add_point(start, stop, &iwe, buf);
}
- if (ieee->wpa_enabled && network->rsn_ie_len) {
+ if (network->rsn_ie_len) {
char buf[MAX_WPA_IE_LEN * 2 + 30];
u8 *p = buf;
@@ -197,8 +221,8 @@ static inline char *ipw2100_translate_scan(struct ieee80211_device *ieee,
iwe.cmd = IWEVCUSTOM;
p = custom;
p += snprintf(p, MAX_CUSTOM_LEN - (p - custom),
- " Last beacon: %lums ago",
- (jiffies - network->last_scanned) / (HZ / 100));
+ " Last beacon: %dms ago",
+ jiffies_to_msecs(jiffies - network->last_scanned));
iwe.u.data.length = p - custom;
if (iwe.u.data.length)
start = iwe_stream_add_point(start, stop, &iwe, custom);
@@ -228,13 +252,13 @@ int ieee80211_wx_get_scan(struct ieee80211_device *ieee,
ev = ipw2100_translate_scan(ieee, ev, stop, network);
else
IEEE80211_DEBUG_SCAN("Not showing network '%s ("
- MAC_FMT ")' due to age (%lums).\n",
+ MAC_FMT ")' due to age (%dms).\n",
escape_essid(network->ssid,
network->ssid_len),
MAC_ARG(network->bssid),
- (jiffies -
- network->last_scanned) / (HZ /
- 100));
+ jiffies_to_msecs(jiffies -
+ network->
+ last_scanned));
}
spin_unlock_irqrestore(&ieee->lock, flags);
@@ -258,6 +282,7 @@ int ieee80211_wx_set_encode(struct ieee80211_device *ieee,
};
int i, key, key_provided, len;
struct ieee80211_crypt_data **crypt;
+ int host_crypto = ieee->host_encrypt || ieee->host_decrypt;
IEEE80211_DEBUG_WX("SET_ENCODE\n");
@@ -298,15 +323,17 @@ int ieee80211_wx_set_encode(struct ieee80211_device *ieee,
if (i == WEP_KEYS) {
sec.enabled = 0;
+ sec.encrypt = 0;
sec.level = SEC_LEVEL_0;
- sec.flags |= SEC_ENABLED | SEC_LEVEL;
+ sec.flags |= SEC_ENABLED | SEC_LEVEL | SEC_ENCRYPT;
}
goto done;
}
sec.enabled = 1;
- sec.flags |= SEC_ENABLED;
+ sec.encrypt = 1;
+ sec.flags |= SEC_ENABLED | SEC_ENCRYPT;
if (*crypt != NULL && (*crypt)->ops != NULL &&
strcmp((*crypt)->ops->name, "WEP") != 0) {
@@ -315,7 +342,7 @@ int ieee80211_wx_set_encode(struct ieee80211_device *ieee,
ieee80211_crypt_delayed_deinit(ieee, crypt);
}
- if (*crypt == NULL) {
+ if (*crypt == NULL && host_crypto) {
struct ieee80211_crypt_data *new_crypt;
/* take WEP into use */
@@ -355,49 +382,56 @@ int ieee80211_wx_set_encode(struct ieee80211_device *ieee,
key, escape_essid(sec.keys[key], len),
erq->length, len);
sec.key_sizes[key] = len;
- (*crypt)->ops->set_key(sec.keys[key], len, NULL,
- (*crypt)->priv);
+ if (*crypt)
+ (*crypt)->ops->set_key(sec.keys[key], len, NULL,
+ (*crypt)->priv);
sec.flags |= (1 << key);
/* This ensures a key will be activated if no key is
* explicitely set */
if (key == sec.active_key)
sec.flags |= SEC_ACTIVE_KEY;
+
} else {
- len = (*crypt)->ops->get_key(sec.keys[key], WEP_KEY_LEN,
- NULL, (*crypt)->priv);
- if (len == 0) {
- /* Set a default key of all 0 */
- IEEE80211_DEBUG_WX("Setting key %d to all zero.\n",
- key);
- memset(sec.keys[key], 0, 13);
- (*crypt)->ops->set_key(sec.keys[key], 13, NULL,
- (*crypt)->priv);
- sec.key_sizes[key] = 13;
- sec.flags |= (1 << key);
+ if (host_crypto) {
+ len = (*crypt)->ops->get_key(sec.keys[key], WEP_KEY_LEN,
+ NULL, (*crypt)->priv);
+ if (len == 0) {
+ /* Set a default key of all 0 */
+ IEEE80211_DEBUG_WX("Setting key %d to all "
+ "zero.\n", key);
+ memset(sec.keys[key], 0, 13);
+ (*crypt)->ops->set_key(sec.keys[key], 13, NULL,
+ (*crypt)->priv);
+ sec.key_sizes[key] = 13;
+ sec.flags |= (1 << key);
+ }
}
-
/* No key data - just set the default TX key index */
if (key_provided) {
- IEEE80211_DEBUG_WX
- ("Setting key %d to default Tx key.\n", key);
+ IEEE80211_DEBUG_WX("Setting key %d to default Tx "
+ "key.\n", key);
ieee->tx_keyidx = key;
sec.active_key = key;
sec.flags |= SEC_ACTIVE_KEY;
}
}
-
- done:
- ieee->open_wep = !(erq->flags & IW_ENCODE_RESTRICTED);
- sec.auth_mode = ieee->open_wep ? WLAN_AUTH_OPEN : WLAN_AUTH_SHARED_KEY;
- sec.flags |= SEC_AUTH_MODE;
- IEEE80211_DEBUG_WX("Auth: %s\n", sec.auth_mode == WLAN_AUTH_OPEN ?
- "OPEN" : "SHARED KEY");
+ if (erq->flags & (IW_ENCODE_OPEN | IW_ENCODE_RESTRICTED)) {
+ ieee->open_wep = !(erq->flags & IW_ENCODE_RESTRICTED);
+ sec.auth_mode = ieee->open_wep ? WLAN_AUTH_OPEN :
+ WLAN_AUTH_SHARED_KEY;
+ sec.flags |= SEC_AUTH_MODE;
+ IEEE80211_DEBUG_WX("Auth: %s\n",
+ sec.auth_mode == WLAN_AUTH_OPEN ?
+ "OPEN" : "SHARED KEY");
+ }
/* For now we just support WEP, so only set that security level...
* TODO: When WPA is added this is one place that needs to change */
sec.flags |= SEC_LEVEL;
sec.level = SEC_LEVEL_1; /* 40 and 104 bit WEP */
+ sec.encode_alg[key] = SEC_ALG_WEP;
+ done:
if (ieee->set_security)
ieee->set_security(dev, &sec);
@@ -422,6 +456,7 @@ int ieee80211_wx_get_encode(struct ieee80211_device *ieee,
struct iw_point *erq = &(wrqu->encoding);
int len, key;
struct ieee80211_crypt_data *crypt;
+ struct ieee80211_security *sec = &ieee->sec;
IEEE80211_DEBUG_WX("GET_ENCODE\n");
@@ -436,23 +471,16 @@ int ieee80211_wx_get_encode(struct ieee80211_device *ieee,
crypt = ieee->crypt[key];
erq->flags = key + 1;
- if (crypt == NULL || crypt->ops == NULL) {
+ if (!sec->enabled) {
erq->length = 0;
erq->flags |= IW_ENCODE_DISABLED;
return 0;
}
- if (strcmp(crypt->ops->name, "WEP") != 0) {
- /* only WEP is supported with wireless extensions, so just
- * report that encryption is used */
- erq->length = 0;
- erq->flags |= IW_ENCODE_ENABLED;
- return 0;
- }
+ len = sec->key_sizes[key];
+ memcpy(keybuf, sec->keys[key], len);
- len = crypt->ops->get_key(keybuf, WEP_KEY_LEN, NULL, crypt->priv);
erq->length = (len >= 0 ? len : 0);
-
erq->flags |= IW_ENCODE_ENABLED;
if (ieee->open_wep)
@@ -463,6 +491,240 @@ int ieee80211_wx_get_encode(struct ieee80211_device *ieee,
return 0;
}
+int ieee80211_wx_set_encodeext(struct ieee80211_device *ieee,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct net_device *dev = ieee->dev;
+ struct iw_point *encoding = &wrqu->encoding;
+ struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
+ int i, idx, ret = 0;
+ int group_key = 0;
+ const char *alg, *module;
+ struct ieee80211_crypto_ops *ops;
+ struct ieee80211_crypt_data **crypt;
+
+ struct ieee80211_security sec = {
+ .flags = 0,
+ };
+
+ idx = encoding->flags & IW_ENCODE_INDEX;
+ if (idx) {
+ if (idx < 1 || idx > WEP_KEYS)
+ return -EINVAL;
+ idx--;
+ } else
+ idx = ieee->tx_keyidx;
+
+ if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY) {
+ crypt = &ieee->crypt[idx];
+ group_key = 1;
+ } else {
+ if (idx != 0)
+ return -EINVAL;
+ if (ieee->iw_mode == IW_MODE_INFRA)
+ crypt = &ieee->crypt[idx];
+ else
+ return -EINVAL;
+ }
+
+ sec.flags |= SEC_ENABLED | SEC_ENCRYPT;
+ if ((encoding->flags & IW_ENCODE_DISABLED) ||
+ ext->alg == IW_ENCODE_ALG_NONE) {
+ if (*crypt)
+ ieee80211_crypt_delayed_deinit(ieee, crypt);
+
+ for (i = 0; i < WEP_KEYS; i++)
+ if (ieee->crypt[i] != NULL)
+ break;
+
+ if (i == WEP_KEYS) {
+ sec.enabled = 0;
+ sec.encrypt = 0;
+ sec.level = SEC_LEVEL_0;
+ sec.flags |= SEC_LEVEL;
+ }
+ goto done;
+ }
+
+ sec.enabled = 1;
+ sec.encrypt = 1;
+
+ if (group_key ? !ieee->host_mc_decrypt :
+ !(ieee->host_encrypt || ieee->host_decrypt ||
+ ieee->host_encrypt_msdu))
+ goto skip_host_crypt;
+
+ switch (ext->alg) {
+ case IW_ENCODE_ALG_WEP:
+ alg = "WEP";
+ module = "ieee80211_crypt_wep";
+ break;
+ case IW_ENCODE_ALG_TKIP:
+ alg = "TKIP";
+ module = "ieee80211_crypt_tkip";
+ break;
+ case IW_ENCODE_ALG_CCMP:
+ alg = "CCMP";
+ module = "ieee80211_crypt_ccmp";
+ break;
+ default:
+ IEEE80211_DEBUG_WX("%s: unknown crypto alg %d\n",
+ dev->name, ext->alg);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ ops = ieee80211_get_crypto_ops(alg);
+ if (ops == NULL) {
+ request_module(module);
+ ops = ieee80211_get_crypto_ops(alg);
+ }
+ if (ops == NULL) {
+ IEEE80211_DEBUG_WX("%s: unknown crypto alg %d\n",
+ dev->name, ext->alg);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ if (*crypt == NULL || (*crypt)->ops != ops) {
+ struct ieee80211_crypt_data *new_crypt;
+
+ ieee80211_crypt_delayed_deinit(ieee, crypt);
+
+ new_crypt = (struct ieee80211_crypt_data *)
+ kmalloc(sizeof(*new_crypt), GFP_KERNEL);
+ if (new_crypt == NULL) {
+ ret = -ENOMEM;
+ goto done;
+ }
+ memset(new_crypt, 0, sizeof(struct ieee80211_crypt_data));
+ new_crypt->ops = ops;
+ if (new_crypt->ops && try_module_get(new_crypt->ops->owner))
+ new_crypt->priv = new_crypt->ops->init(idx);
+ if (new_crypt->priv == NULL) {
+ kfree(new_crypt);
+ ret = -EINVAL;
+ goto done;
+ }
+ *crypt = new_crypt;
+ }
+
+ if (ext->key_len > 0 && (*crypt)->ops->set_key &&
+ (*crypt)->ops->set_key(ext->key, ext->key_len, ext->rx_seq,
+ (*crypt)->priv) < 0) {
+ IEEE80211_DEBUG_WX("%s: key setting failed\n", dev->name);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ skip_host_crypt:
+ if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) {
+ ieee->tx_keyidx = idx;
+ sec.active_key = idx;
+ sec.flags |= SEC_ACTIVE_KEY;
+ }
+
+ if (ext->alg != IW_ENCODE_ALG_NONE) {
+ memcpy(sec.keys[idx], ext->key, ext->key_len);
+ sec.key_sizes[idx] = ext->key_len;
+ sec.flags |= (1 << idx);
+ if (ext->alg == IW_ENCODE_ALG_WEP) {
+ sec.encode_alg[idx] = SEC_ALG_WEP;
+ sec.flags |= SEC_LEVEL;
+ sec.level = SEC_LEVEL_1;
+ } else if (ext->alg == IW_ENCODE_ALG_TKIP) {
+ sec.encode_alg[idx] = SEC_ALG_TKIP;
+ sec.flags |= SEC_LEVEL;
+ sec.level = SEC_LEVEL_2;
+ } else if (ext->alg == IW_ENCODE_ALG_CCMP) {
+ sec.encode_alg[idx] = SEC_ALG_CCMP;
+ sec.flags |= SEC_LEVEL;
+ sec.level = SEC_LEVEL_3;
+ }
+ /* Don't set sec level for group keys. */
+ if (group_key)
+ sec.flags &= ~SEC_LEVEL;
+ }
+ done:
+ if (ieee->set_security)
+ ieee->set_security(ieee->dev, &sec);
+
+ /*
+ * Do not reset port if card is in Managed mode since resetting will
+ * generate new IEEE 802.11 authentication which may end up in looping
+ * with IEEE 802.1X. If your hardware requires a reset after WEP
+ * configuration (for example... Prism2), implement the reset_port in
+ * the callbacks structures used to initialize the 802.11 stack.
+ */
+ if (ieee->reset_on_keychange &&
+ ieee->iw_mode != IW_MODE_INFRA &&
+ ieee->reset_port && ieee->reset_port(dev)) {
+ IEEE80211_DEBUG_WX("%s: reset_port failed\n", dev->name);
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+int ieee80211_wx_get_encodeext(struct ieee80211_device *ieee,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct iw_point *encoding = &wrqu->encoding;
+ struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
+ struct ieee80211_security *sec = &ieee->sec;
+ int idx, max_key_len;
+
+ max_key_len = encoding->length - sizeof(*ext);
+ if (max_key_len < 0)
+ return -EINVAL;
+
+ idx = encoding->flags & IW_ENCODE_INDEX;
+ if (idx) {
+ if (idx < 1 || idx > WEP_KEYS)
+ return -EINVAL;
+ idx--;
+ } else
+ idx = ieee->tx_keyidx;
+
+ if (!ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY)
+ if (idx != 0 || ieee->iw_mode != IW_MODE_INFRA)
+ return -EINVAL;
+
+ encoding->flags = idx + 1;
+ memset(ext, 0, sizeof(*ext));
+
+ if (!sec->enabled) {
+ ext->alg = IW_ENCODE_ALG_NONE;
+ ext->key_len = 0;
+ encoding->flags |= IW_ENCODE_DISABLED;
+ } else {
+ if (sec->encode_alg[idx] == SEC_ALG_WEP)
+ ext->alg = IW_ENCODE_ALG_WEP;
+ else if (sec->encode_alg[idx] == SEC_ALG_TKIP)
+ ext->alg = IW_ENCODE_ALG_TKIP;
+ else if (sec->encode_alg[idx] == SEC_ALG_CCMP)
+ ext->alg = IW_ENCODE_ALG_CCMP;
+ else
+ return -EINVAL;
+
+ ext->key_len = sec->key_sizes[idx];
+ memcpy(ext->key, sec->keys[idx], ext->key_len);
+ encoding->flags |= IW_ENCODE_ENABLED;
+ if (ext->key_len &&
+ (ext->alg == IW_ENCODE_ALG_TKIP ||
+ ext->alg == IW_ENCODE_ALG_CCMP))
+ ext->ext_flags |= IW_ENCODE_EXT_TX_SEQ_VALID;
+
+ }
+
+ return 0;
+}
+
+EXPORT_SYMBOL(ieee80211_wx_set_encodeext);
+EXPORT_SYMBOL(ieee80211_wx_get_encodeext);
+
EXPORT_SYMBOL(ieee80211_wx_get_scan);
EXPORT_SYMBOL(ieee80211_wx_set_encode);
EXPORT_SYMBOL(ieee80211_wx_get_encode);
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 74f2207e131..4ec4b2ca6ab 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -715,6 +715,7 @@ int devinet_ioctl(unsigned int cmd, void __user *arg)
break;
ret = 0;
if (ifa->ifa_mask != sin->sin_addr.s_addr) {
+ u32 old_mask = ifa->ifa_mask;
inet_del_ifa(in_dev, ifap, 0);
ifa->ifa_mask = sin->sin_addr.s_addr;
ifa->ifa_prefixlen = inet_mask_len(ifa->ifa_mask);
@@ -728,7 +729,7 @@ int devinet_ioctl(unsigned int cmd, void __user *arg)
if ((dev->flags & IFF_BROADCAST) &&
(ifa->ifa_prefixlen < 31) &&
(ifa->ifa_broadcast ==
- (ifa->ifa_local|~ifa->ifa_mask))) {
+ (ifa->ifa_local|~old_mask))) {
ifa->ifa_broadcast = (ifa->ifa_local |
~sin->sin_addr.s_addr);
}
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 0093ea08c7f..66247f38b37 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -2404,7 +2404,7 @@ static int fib_route_seq_show(struct seq_file *seq, void *v)
prefix = htonl(l->key);
list_for_each_entry_rcu(fa, &li->falh, fa_list) {
- const struct fib_info *fi = rcu_dereference(fa->fa_info);
+ const struct fib_info *fi = fa->fa_info;
unsigned flags = fib_flag_trans(fa->fa_type, mask, fi);
if (fa->fa_type == RTN_BROADCAST
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 90dca711ac9..175e093ec56 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -1108,12 +1108,9 @@ void __init icmp_init(struct net_proto_family *ops)
struct inet_sock *inet;
int i;
- for (i = 0; i < NR_CPUS; i++) {
+ for_each_cpu(i) {
int err;
- if (!cpu_possible(i))
- continue;
-
err = sock_create_kern(PF_INET, SOCK_RAW, IPPROTO_ICMP,
&per_cpu(__icmp_socket, i));
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 3f1a263e124..87e350069ab 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -391,6 +391,9 @@ static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
to->nfct = from->nfct;
nf_conntrack_get(to->nfct);
to->nfctinfo = from->nfctinfo;
+#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
+ to->ipvs_property = from->ipvs_property;
+#endif
#ifdef CONFIG_BRIDGE_NETFILTER
nf_bridge_put(to->nf_bridge);
to->nf_bridge = from->nf_bridge;
@@ -1020,10 +1023,7 @@ ssize_t ip_append_page(struct sock *sk, struct page *page,
int alloclen;
skb_prev = skb;
- if (skb_prev)
- fraggap = skb_prev->len - maxfraglen;
- else
- fraggap = 0;
+ fraggap = skb_prev->len - maxfraglen;
alloclen = fragheaderlen + hh_len + fraggap + 15;
skb = sock_wmalloc(sk, alloclen, 1, sk->sk_allocation);
diff --git a/net/ipv4/netfilter/ip_conntrack_core.c b/net/ipv4/netfilter/ip_conntrack_core.c
index 07a80b56e8d..422ab68ee7f 100644
--- a/net/ipv4/netfilter/ip_conntrack_core.c
+++ b/net/ipv4/netfilter/ip_conntrack_core.c
@@ -50,7 +50,7 @@
#include <linux/netfilter_ipv4/ip_conntrack_core.h>
#include <linux/netfilter_ipv4/listhelp.h>
-#define IP_CONNTRACK_VERSION "2.3"
+#define IP_CONNTRACK_VERSION "2.4"
#if 0
#define DEBUGP printk
@@ -148,16 +148,20 @@ DEFINE_PER_CPU(struct ip_conntrack_stat, ip_conntrack_stat);
static int ip_conntrack_hash_rnd_initted;
static unsigned int ip_conntrack_hash_rnd;
-static u_int32_t
-hash_conntrack(const struct ip_conntrack_tuple *tuple)
+static u_int32_t __hash_conntrack(const struct ip_conntrack_tuple *tuple,
+ unsigned int size, unsigned int rnd)
{
-#if 0
- dump_tuple(tuple);
-#endif
return (jhash_3words(tuple->src.ip,
(tuple->dst.ip ^ tuple->dst.protonum),
(tuple->src.u.all | (tuple->dst.u.all << 16)),
- ip_conntrack_hash_rnd) % ip_conntrack_htable_size);
+ rnd) % size);
+}
+
+static u_int32_t
+hash_conntrack(const struct ip_conntrack_tuple *tuple)
+{
+ return __hash_conntrack(tuple, ip_conntrack_htable_size,
+ ip_conntrack_hash_rnd);
}
int
@@ -1341,14 +1345,13 @@ static int kill_all(struct ip_conntrack *i, void *data)
return 1;
}
-static void free_conntrack_hash(void)
+static void free_conntrack_hash(struct list_head *hash, int vmalloced,int size)
{
- if (ip_conntrack_vmalloc)
- vfree(ip_conntrack_hash);
+ if (vmalloced)
+ vfree(hash);
else
- free_pages((unsigned long)ip_conntrack_hash,
- get_order(sizeof(struct list_head)
- * ip_conntrack_htable_size));
+ free_pages((unsigned long)hash,
+ get_order(sizeof(struct list_head) * size));
}
void ip_conntrack_flush()
@@ -1378,12 +1381,83 @@ void ip_conntrack_cleanup(void)
ip_conntrack_flush();
kmem_cache_destroy(ip_conntrack_cachep);
kmem_cache_destroy(ip_conntrack_expect_cachep);
- free_conntrack_hash();
+ free_conntrack_hash(ip_conntrack_hash, ip_conntrack_vmalloc,
+ ip_conntrack_htable_size);
nf_unregister_sockopt(&so_getorigdst);
}
-static int hashsize;
-module_param(hashsize, int, 0400);
+static struct list_head *alloc_hashtable(int size, int *vmalloced)
+{
+ struct list_head *hash;
+ unsigned int i;
+
+ *vmalloced = 0;
+ hash = (void*)__get_free_pages(GFP_KERNEL,
+ get_order(sizeof(struct list_head)
+ * size));
+ if (!hash) {
+ *vmalloced = 1;
+ printk(KERN_WARNING"ip_conntrack: falling back to vmalloc.\n");
+ hash = vmalloc(sizeof(struct list_head) * size);
+ }
+
+ if (hash)
+ for (i = 0; i < size; i++)
+ INIT_LIST_HEAD(&hash[i]);
+
+ return hash;
+}
+
+int set_hashsize(const char *val, struct kernel_param *kp)
+{
+ int i, bucket, hashsize, vmalloced;
+ int old_vmalloced, old_size;
+ int rnd;
+ struct list_head *hash, *old_hash;
+ struct ip_conntrack_tuple_hash *h;
+
+ /* On boot, we can set this without any fancy locking. */
+ if (!ip_conntrack_htable_size)
+ return param_set_int(val, kp);
+
+ hashsize = simple_strtol(val, NULL, 0);
+ if (!hashsize)
+ return -EINVAL;
+
+ hash = alloc_hashtable(hashsize, &vmalloced);
+ if (!hash)
+ return -ENOMEM;
+
+ /* We have to rehash for the new table anyway, so we also can
+ * use a new random seed */
+ get_random_bytes(&rnd, 4);
+
+ write_lock_bh(&ip_conntrack_lock);
+ for (i = 0; i < ip_conntrack_htable_size; i++) {
+ while (!list_empty(&ip_conntrack_hash[i])) {
+ h = list_entry(ip_conntrack_hash[i].next,
+ struct ip_conntrack_tuple_hash, list);
+ list_del(&h->list);
+ bucket = __hash_conntrack(&h->tuple, hashsize, rnd);
+ list_add_tail(&h->list, &hash[bucket]);
+ }
+ }
+ old_size = ip_conntrack_htable_size;
+ old_vmalloced = ip_conntrack_vmalloc;
+ old_hash = ip_conntrack_hash;
+
+ ip_conntrack_htable_size = hashsize;
+ ip_conntrack_vmalloc = vmalloced;
+ ip_conntrack_hash = hash;
+ ip_conntrack_hash_rnd = rnd;
+ write_unlock_bh(&ip_conntrack_lock);
+
+ free_conntrack_hash(old_hash, old_vmalloced, old_size);
+ return 0;
+}
+
+module_param_call(hashsize, set_hashsize, param_get_uint,
+ &ip_conntrack_htable_size, 0600);
int __init ip_conntrack_init(void)
{
@@ -1392,9 +1466,7 @@ int __init ip_conntrack_init(void)
/* Idea from tcp.c: use 1/16384 of memory. On i386: 32MB
* machine has 256 buckets. >= 1GB machines have 8192 buckets. */
- if (hashsize) {
- ip_conntrack_htable_size = hashsize;
- } else {
+ if (!ip_conntrack_htable_size) {
ip_conntrack_htable_size
= (((num_physpages << PAGE_SHIFT) / 16384)
/ sizeof(struct list_head));
@@ -1416,20 +1488,8 @@ int __init ip_conntrack_init(void)
return ret;
}
- /* AK: the hash table is twice as big than needed because it
- uses list_head. it would be much nicer to caches to use a
- single pointer list head here. */
- ip_conntrack_vmalloc = 0;
- ip_conntrack_hash
- =(void*)__get_free_pages(GFP_KERNEL,
- get_order(sizeof(struct list_head)
- *ip_conntrack_htable_size));
- if (!ip_conntrack_hash) {
- ip_conntrack_vmalloc = 1;
- printk(KERN_WARNING "ip_conntrack: falling back to vmalloc.\n");
- ip_conntrack_hash = vmalloc(sizeof(struct list_head)
- * ip_conntrack_htable_size);
- }
+ ip_conntrack_hash = alloc_hashtable(ip_conntrack_htable_size,
+ &ip_conntrack_vmalloc);
if (!ip_conntrack_hash) {
printk(KERN_ERR "Unable to create ip_conntrack_hash\n");
goto err_unreg_sockopt;
@@ -1461,9 +1521,6 @@ int __init ip_conntrack_init(void)
ip_ct_protos[IPPROTO_ICMP] = &ip_conntrack_protocol_icmp;
write_unlock_bh(&ip_conntrack_lock);
- for (i = 0; i < ip_conntrack_htable_size; i++)
- INIT_LIST_HEAD(&ip_conntrack_hash[i]);
-
/* For use by ipt_REJECT */
ip_ct_attach = ip_conntrack_attach;
@@ -1478,7 +1535,8 @@ int __init ip_conntrack_init(void)
err_free_conntrack_slab:
kmem_cache_destroy(ip_conntrack_cachep);
err_free_hash:
- free_conntrack_hash();
+ free_conntrack_hash(ip_conntrack_hash, ip_conntrack_vmalloc,
+ ip_conntrack_htable_size);
err_unreg_sockopt:
nf_unregister_sockopt(&so_getorigdst);
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index f7943ba1f43..a65e508fbd4 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -90,9 +90,7 @@ fold_field(void *mib[], int offt)
unsigned long res = 0;
int i;
- for (i = 0; i < NR_CPUS; i++) {
- if (!cpu_possible(i))
- continue;
+ for_each_cpu(i) {
res += *(((unsigned long *) per_cpu_ptr(mib[0], i)) + offt);
res += *(((unsigned long *) per_cpu_ptr(mib[1], i)) + offt);
}
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 677419d0c9a..3e98b57578d 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2239,6 +2239,7 @@ static int tcp_ack_update_window(struct sock *sk, struct tcp_sock *tp,
/* Note, it is the only place, where
* fast path is recovered for sending TCP.
*/
+ tp->pred_flags = 0;
tcp_fast_path_check(sk, tp);
if (nwin > tp->max_window) {
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 7114031fdc7..b907456a79f 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -435,17 +435,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss
int nsize, old_factor;
u16 flags;
- if (unlikely(len >= skb->len)) {
- if (net_ratelimit()) {
- printk(KERN_DEBUG "TCP: seg_size=%u, mss=%u, seq=%u, "
- "end_seq=%u, skb->len=%u.\n", len, mss_now,
- TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
- skb->len);
- WARN_ON(1);
- }
- return 0;
- }
-
+ BUG_ON(len > skb->len);
nsize = skb_headlen(skb) - len;
if (nsize < 0)
nsize = 0;
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index b7185fb3377..23e540365a1 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -700,10 +700,7 @@ int __init icmpv6_init(struct net_proto_family *ops)
struct sock *sk;
int err, i, j;
- for (i = 0; i < NR_CPUS; i++) {
- if (!cpu_possible(i))
- continue;
-
+ for_each_cpu(i) {
err = sock_create_kern(PF_INET6, SOCK_RAW, IPPROTO_ICMPV6,
&per_cpu(__icmpv6_socket, i));
if (err < 0) {
@@ -749,9 +746,7 @@ void icmpv6_cleanup(void)
{
int i;
- for (i = 0; i < NR_CPUS; i++) {
- if (!cpu_possible(i))
- continue;
+ for_each_cpu(i) {
sock_release(per_cpu(__icmpv6_socket, i));
}
inet6_del_protocol(&icmpv6_protocol, IPPROTO_ICMPV6);
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index f841bde30c1..bbbe80cdaf7 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -483,7 +483,7 @@ int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen)
goto done;
}
fl1 = sfl->fl;
- atomic_inc(&fl->users);
+ atomic_inc(&fl1->users);
break;
}
}
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
index 334a5967831..50a13e75d70 100644
--- a/net/ipv6/proc.c
+++ b/net/ipv6/proc.c
@@ -140,9 +140,7 @@ fold_field(void *mib[], int offt)
unsigned long res = 0;
int i;
- for (i = 0; i < NR_CPUS; i++) {
- if (!cpu_possible(i))
- continue;
+ for_each_cpu(i) {
res += *(((unsigned long *)per_cpu_ptr(mib[0], i)) + offt);
res += *(((unsigned long *)per_cpu_ptr(mib[1], i)) + offt);
}
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 678c3f2c0d0..5ca283537bc 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -740,11 +740,8 @@ int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock, long t
int netlink_sendskb(struct sock *sk, struct sk_buff *skb, int protocol)
{
- struct netlink_sock *nlk;
int len = skb->len;
- nlk = nlk_sk(sk);
-
skb_queue_tail(&sk->sk_receive_queue, skb);
sk->sk_data_ready(sk, len);
sock_put(sk);
@@ -827,7 +824,7 @@ struct netlink_broadcast_data {
int failure;
int congested;
int delivered;
- unsigned int allocation;
+ gfp_t allocation;
struct sk_buff *skb, *skb2;
};
diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c
index e556d92c0bc..b18fe504301 100644
--- a/net/rose/rose_route.c
+++ b/net/rose/rose_route.c
@@ -727,7 +727,7 @@ int rose_rt_ioctl(unsigned int cmd, void __user *arg)
}
if (rose_route.mask > 10) /* Mask can't be more than 10 digits */
return -EINVAL;
- if (rose_route.ndigis > 8) /* No more than 8 digipeats */
+ if (rose_route.ndigis > AX25_MAX_DIGIS)
return -EINVAL;
err = rose_add_node(&rose_route, dev);
dev_put(dev);
diff --git a/net/sctp/proc.c b/net/sctp/proc.c
index b74f7772b57..6e4dc28874d 100644
--- a/net/sctp/proc.c
+++ b/net/sctp/proc.c
@@ -69,9 +69,7 @@ fold_field(void *mib[], int nr)
unsigned long res = 0;
int i;
- for (i = 0; i < NR_CPUS; i++) {
- if (!cpu_possible(i))
- continue;
+ for_each_cpu(i) {
res +=
*((unsigned long *) (((void *) per_cpu_ptr(mib[0], i)) +
sizeof (unsigned long) * nr));
diff --git a/net/sunrpc/Makefile b/net/sunrpc/Makefile
index 46a2ce00a29..cdcab9ca4c6 100644
--- a/net/sunrpc/Makefile
+++ b/net/sunrpc/Makefile
@@ -6,7 +6,7 @@
obj-$(CONFIG_SUNRPC) += sunrpc.o
obj-$(CONFIG_SUNRPC_GSS) += auth_gss/
-sunrpc-y := clnt.o xprt.o sched.o \
+sunrpc-y := clnt.o xprt.o socklib.o xprtsock.o sched.o \
auth.o auth_null.o auth_unix.o \
svc.o svcsock.o svcauth.o svcauth_unix.o \
pmap_clnt.o timer.o xdr.o \
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index 505e2d4b3d6..a415d99c394 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -11,7 +11,6 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/errno.h>
-#include <linux/socket.h>
#include <linux/sunrpc/clnt.h>
#include <linux/spinlock.h>
diff --git a/net/sunrpc/auth_gss/Makefile b/net/sunrpc/auth_gss/Makefile
index fe1b874084b..f3431a7e33d 100644
--- a/net/sunrpc/auth_gss/Makefile
+++ b/net/sunrpc/auth_gss/Makefile
@@ -10,7 +10,7 @@ auth_rpcgss-objs := auth_gss.o gss_generic_token.o \
obj-$(CONFIG_RPCSEC_GSS_KRB5) += rpcsec_gss_krb5.o
rpcsec_gss_krb5-objs := gss_krb5_mech.o gss_krb5_seal.o gss_krb5_unseal.o \
- gss_krb5_seqnum.o
+ gss_krb5_seqnum.o gss_krb5_wrap.o
obj-$(CONFIG_RPCSEC_GSS_SPKM3) += rpcsec_gss_spkm3.o
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 2f7b867161d..f44f46f1d8e 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -42,9 +42,8 @@
#include <linux/init.h>
#include <linux/types.h>
#include <linux/slab.h>
-#include <linux/socket.h>
-#include <linux/in.h>
#include <linux/sched.h>
+#include <linux/pagemap.h>
#include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/auth.h>
#include <linux/sunrpc/auth_gss.h>
@@ -846,10 +845,8 @@ gss_marshal(struct rpc_task *task, u32 *p)
/* We compute the checksum for the verifier over the xdr-encoded bytes
* starting with the xid and ending at the end of the credential: */
- iov.iov_base = req->rq_snd_buf.head[0].iov_base;
- if (task->tk_client->cl_xprt->stream)
- /* See clnt.c:call_header() */
- iov.iov_base += 4;
+ iov.iov_base = xprt_skip_transport_header(task->tk_xprt,
+ req->rq_snd_buf.head[0].iov_base);
iov.iov_len = (u8 *)p - (u8 *)iov.iov_base;
xdr_buf_from_iov(&iov, &verf_buf);
@@ -857,9 +854,7 @@ gss_marshal(struct rpc_task *task, u32 *p)
*p++ = htonl(RPC_AUTH_GSS);
mic.data = (u8 *)(p + 1);
- maj_stat = gss_get_mic(ctx->gc_gss_ctx,
- GSS_C_QOP_DEFAULT,
- &verf_buf, &mic);
+ maj_stat = gss_get_mic(ctx->gc_gss_ctx, &verf_buf, &mic);
if (maj_stat == GSS_S_CONTEXT_EXPIRED) {
cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE;
} else if (maj_stat != 0) {
@@ -890,10 +885,8 @@ static u32 *
gss_validate(struct rpc_task *task, u32 *p)
{
struct rpc_cred *cred = task->tk_msg.rpc_cred;
- struct gss_cred *gss_cred = container_of(cred, struct gss_cred,
- gc_base);
struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
- u32 seq, qop_state;
+ u32 seq;
struct kvec iov;
struct xdr_buf verf_buf;
struct xdr_netobj mic;
@@ -914,23 +907,14 @@ gss_validate(struct rpc_task *task, u32 *p)
mic.data = (u8 *)p;
mic.len = len;
- maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &verf_buf, &mic, &qop_state);
+ maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &verf_buf, &mic);
if (maj_stat == GSS_S_CONTEXT_EXPIRED)
cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE;
if (maj_stat)
goto out_bad;
- switch (gss_cred->gc_service) {
- case RPC_GSS_SVC_NONE:
- /* verifier data, flavor, length: */
- task->tk_auth->au_rslack = XDR_QUADLEN(len) + 2;
- break;
- case RPC_GSS_SVC_INTEGRITY:
- /* verifier data, flavor, length, length, sequence number: */
- task->tk_auth->au_rslack = XDR_QUADLEN(len) + 4;
- break;
- case RPC_GSS_SVC_PRIVACY:
- goto out_bad;
- }
+ /* We leave it to unwrap to calculate au_rslack. For now we just
+ * calculate the length of the verifier: */
+ task->tk_auth->au_verfsize = XDR_QUADLEN(len) + 2;
gss_put_ctx(ctx);
dprintk("RPC: %4u GSS gss_validate: gss_verify_mic succeeded.\n",
task->tk_pid);
@@ -975,8 +959,7 @@ gss_wrap_req_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
p = iov->iov_base + iov->iov_len;
mic.data = (u8 *)(p + 1);
- maj_stat = gss_get_mic(ctx->gc_gss_ctx,
- GSS_C_QOP_DEFAULT, &integ_buf, &mic);
+ maj_stat = gss_get_mic(ctx->gc_gss_ctx, &integ_buf, &mic);
status = -EIO; /* XXX? */
if (maj_stat == GSS_S_CONTEXT_EXPIRED)
cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE;
@@ -990,6 +973,113 @@ gss_wrap_req_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
return 0;
}
+static void
+priv_release_snd_buf(struct rpc_rqst *rqstp)
+{
+ int i;
+
+ for (i=0; i < rqstp->rq_enc_pages_num; i++)
+ __free_page(rqstp->rq_enc_pages[i]);
+ kfree(rqstp->rq_enc_pages);
+}
+
+static int
+alloc_enc_pages(struct rpc_rqst *rqstp)
+{
+ struct xdr_buf *snd_buf = &rqstp->rq_snd_buf;
+ int first, last, i;
+
+ if (snd_buf->page_len == 0) {
+ rqstp->rq_enc_pages_num = 0;
+ return 0;
+ }
+
+ first = snd_buf->page_base >> PAGE_CACHE_SHIFT;
+ last = (snd_buf->page_base + snd_buf->page_len - 1) >> PAGE_CACHE_SHIFT;
+ rqstp->rq_enc_pages_num = last - first + 1 + 1;
+ rqstp->rq_enc_pages
+ = kmalloc(rqstp->rq_enc_pages_num * sizeof(struct page *),
+ GFP_NOFS);
+ if (!rqstp->rq_enc_pages)
+ goto out;
+ for (i=0; i < rqstp->rq_enc_pages_num; i++) {
+ rqstp->rq_enc_pages[i] = alloc_page(GFP_NOFS);
+ if (rqstp->rq_enc_pages[i] == NULL)
+ goto out_free;
+ }
+ rqstp->rq_release_snd_buf = priv_release_snd_buf;
+ return 0;
+out_free:
+ for (i--; i >= 0; i--) {
+ __free_page(rqstp->rq_enc_pages[i]);
+ }
+out:
+ return -EAGAIN;
+}
+
+static inline int
+gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
+ kxdrproc_t encode, struct rpc_rqst *rqstp, u32 *p, void *obj)
+{
+ struct xdr_buf *snd_buf = &rqstp->rq_snd_buf;
+ u32 offset;
+ u32 maj_stat;
+ int status;
+ u32 *opaque_len;
+ struct page **inpages;
+ int first;
+ int pad;
+ struct kvec *iov;
+ char *tmp;
+
+ opaque_len = p++;
+ offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base;
+ *p++ = htonl(rqstp->rq_seqno);
+
+ status = encode(rqstp, p, obj);
+ if (status)
+ return status;
+
+ status = alloc_enc_pages(rqstp);
+ if (status)
+ return status;
+ first = snd_buf->page_base >> PAGE_CACHE_SHIFT;
+ inpages = snd_buf->pages + first;
+ snd_buf->pages = rqstp->rq_enc_pages;
+ snd_buf->page_base -= first << PAGE_CACHE_SHIFT;
+ /* Give the tail its own page, in case we need extra space in the
+ * head when wrapping: */
+ if (snd_buf->page_len || snd_buf->tail[0].iov_len) {
+ tmp = page_address(rqstp->rq_enc_pages[rqstp->rq_enc_pages_num - 1]);
+ memcpy(tmp, snd_buf->tail[0].iov_base, snd_buf->tail[0].iov_len);
+ snd_buf->tail[0].iov_base = tmp;
+ }
+ maj_stat = gss_wrap(ctx->gc_gss_ctx, offset, snd_buf, inpages);
+ /* RPC_SLACK_SPACE should prevent this ever happening: */
+ BUG_ON(snd_buf->len > snd_buf->buflen);
+ status = -EIO;
+ /* We're assuming that when GSS_S_CONTEXT_EXPIRED, the encryption was
+ * done anyway, so it's safe to put the request on the wire: */
+ if (maj_stat == GSS_S_CONTEXT_EXPIRED)
+ cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE;
+ else if (maj_stat)
+ return status;
+
+ *opaque_len = htonl(snd_buf->len - offset);
+ /* guess whether we're in the head or the tail: */
+ if (snd_buf->page_len || snd_buf->tail[0].iov_len)
+ iov = snd_buf->tail;
+ else
+ iov = snd_buf->head;
+ p = iov->iov_base + iov->iov_len;
+ pad = 3 - ((snd_buf->len - offset - 1) & 3);
+ memset(p, 0, pad);
+ iov->iov_len += pad;
+ snd_buf->len += pad;
+
+ return 0;
+}
+
static int
gss_wrap_req(struct rpc_task *task,
kxdrproc_t encode, void *rqstp, u32 *p, void *obj)
@@ -1017,6 +1107,8 @@ gss_wrap_req(struct rpc_task *task,
rqstp, p, obj);
break;
case RPC_GSS_SVC_PRIVACY:
+ status = gss_wrap_req_priv(cred, ctx, encode,
+ rqstp, p, obj);
break;
}
out:
@@ -1054,8 +1146,7 @@ gss_unwrap_resp_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
if (xdr_buf_read_netobj(rcv_buf, &mic, mic_offset))
return status;
- maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &integ_buf,
- &mic, NULL);
+ maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &integ_buf, &mic);
if (maj_stat == GSS_S_CONTEXT_EXPIRED)
cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE;
if (maj_stat != GSS_S_COMPLETE)
@@ -1063,6 +1154,35 @@ gss_unwrap_resp_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
return 0;
}
+static inline int
+gss_unwrap_resp_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
+ struct rpc_rqst *rqstp, u32 **p)
+{
+ struct xdr_buf *rcv_buf = &rqstp->rq_rcv_buf;
+ u32 offset;
+ u32 opaque_len;
+ u32 maj_stat;
+ int status = -EIO;
+
+ opaque_len = ntohl(*(*p)++);
+ offset = (u8 *)(*p) - (u8 *)rcv_buf->head[0].iov_base;
+ if (offset + opaque_len > rcv_buf->len)
+ return status;
+ /* remove padding: */
+ rcv_buf->len = offset + opaque_len;
+
+ maj_stat = gss_unwrap(ctx->gc_gss_ctx, offset, rcv_buf);
+ if (maj_stat == GSS_S_CONTEXT_EXPIRED)
+ cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE;
+ if (maj_stat != GSS_S_COMPLETE)
+ return status;
+ if (ntohl(*(*p)++) != rqstp->rq_seqno)
+ return status;
+
+ return 0;
+}
+
+
static int
gss_unwrap_resp(struct rpc_task *task,
kxdrproc_t decode, void *rqstp, u32 *p, void *obj)
@@ -1071,6 +1191,9 @@ gss_unwrap_resp(struct rpc_task *task,
struct gss_cred *gss_cred = container_of(cred, struct gss_cred,
gc_base);
struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
+ u32 *savedp = p;
+ struct kvec *head = ((struct rpc_rqst *)rqstp)->rq_rcv_buf.head;
+ int savedlen = head->iov_len;
int status = -EIO;
if (ctx->gc_proc != RPC_GSS_PROC_DATA)
@@ -1084,8 +1207,14 @@ gss_unwrap_resp(struct rpc_task *task,
goto out;
break;
case RPC_GSS_SVC_PRIVACY:
+ status = gss_unwrap_resp_priv(cred, ctx, rqstp, &p);
+ if (status)
+ goto out;
break;
}
+ /* take into account extra slack for integrity and privacy cases: */
+ task->tk_auth->au_rslack = task->tk_auth->au_verfsize + (p - savedp)
+ + (savedlen - head->iov_len);
out_decode:
status = decode(rqstp, p, obj);
out:
diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c
index ee6ae74cd1b..3f3d5437f02 100644
--- a/net/sunrpc/auth_gss/gss_krb5_crypto.c
+++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c
@@ -139,17 +139,91 @@ buf_to_sg(struct scatterlist *sg, char *ptr, int len) {
sg->length = len;
}
+static int
+process_xdr_buf(struct xdr_buf *buf, int offset, int len,
+ int (*actor)(struct scatterlist *, void *), void *data)
+{
+ int i, page_len, thislen, page_offset, ret = 0;
+ struct scatterlist sg[1];
+
+ if (offset >= buf->head[0].iov_len) {
+ offset -= buf->head[0].iov_len;
+ } else {
+ thislen = buf->head[0].iov_len - offset;
+ if (thislen > len)
+ thislen = len;
+ buf_to_sg(sg, buf->head[0].iov_base + offset, thislen);
+ ret = actor(sg, data);
+ if (ret)
+ goto out;
+ offset = 0;
+ len -= thislen;
+ }
+ if (len == 0)
+ goto out;
+
+ if (offset >= buf->page_len) {
+ offset -= buf->page_len;
+ } else {
+ page_len = buf->page_len - offset;
+ if (page_len > len)
+ page_len = len;
+ len -= page_len;
+ page_offset = (offset + buf->page_base) & (PAGE_CACHE_SIZE - 1);
+ i = (offset + buf->page_base) >> PAGE_CACHE_SHIFT;
+ thislen = PAGE_CACHE_SIZE - page_offset;
+ do {
+ if (thislen > page_len)
+ thislen = page_len;
+ sg->page = buf->pages[i];
+ sg->offset = page_offset;
+ sg->length = thislen;
+ ret = actor(sg, data);
+ if (ret)
+ goto out;
+ page_len -= thislen;
+ i++;
+ page_offset = 0;
+ thislen = PAGE_CACHE_SIZE;
+ } while (page_len != 0);
+ offset = 0;
+ }
+ if (len == 0)
+ goto out;
+
+ if (offset < buf->tail[0].iov_len) {
+ thislen = buf->tail[0].iov_len - offset;
+ if (thislen > len)
+ thislen = len;
+ buf_to_sg(sg, buf->tail[0].iov_base + offset, thislen);
+ ret = actor(sg, data);
+ len -= thislen;
+ }
+ if (len != 0)
+ ret = -EINVAL;
+out:
+ return ret;
+}
+
+static int
+checksummer(struct scatterlist *sg, void *data)
+{
+ struct crypto_tfm *tfm = (struct crypto_tfm *)data;
+
+ crypto_digest_update(tfm, sg, 1);
+
+ return 0;
+}
+
/* checksum the plaintext data and hdrlen bytes of the token header */
s32
make_checksum(s32 cksumtype, char *header, int hdrlen, struct xdr_buf *body,
- struct xdr_netobj *cksum)
+ int body_offset, struct xdr_netobj *cksum)
{
char *cksumname;
struct crypto_tfm *tfm = NULL; /* XXX add to ctx? */
struct scatterlist sg[1];
u32 code = GSS_S_FAILURE;
- int len, thislen, offset;
- int i;
switch (cksumtype) {
case CKSUMTYPE_RSA_MD5:
@@ -169,33 +243,8 @@ make_checksum(s32 cksumtype, char *header, int hdrlen, struct xdr_buf *body,
crypto_digest_init(tfm);
buf_to_sg(sg, header, hdrlen);
crypto_digest_update(tfm, sg, 1);
- if (body->head[0].iov_len) {
- buf_to_sg(sg, body->head[0].iov_base, body->head[0].iov_len);
- crypto_digest_update(tfm, sg, 1);
- }
-
- len = body->page_len;
- if (len != 0) {
- offset = body->page_base & (PAGE_CACHE_SIZE - 1);
- i = body->page_base >> PAGE_CACHE_SHIFT;
- thislen = PAGE_CACHE_SIZE - offset;
- do {
- if (thislen > len)
- thislen = len;
- sg->page = body->pages[i];
- sg->offset = offset;
- sg->length = thislen;
- crypto_digest_update(tfm, sg, 1);
- len -= thislen;
- i++;
- offset = 0;
- thislen = PAGE_CACHE_SIZE;
- } while(len != 0);
- }
- if (body->tail[0].iov_len) {
- buf_to_sg(sg, body->tail[0].iov_base, body->tail[0].iov_len);
- crypto_digest_update(tfm, sg, 1);
- }
+ process_xdr_buf(body, body_offset, body->len - body_offset,
+ checksummer, tfm);
crypto_digest_final(tfm, cksum->data);
code = 0;
out:
@@ -204,3 +253,154 @@ out:
}
EXPORT_SYMBOL(make_checksum);
+
+struct encryptor_desc {
+ u8 iv[8]; /* XXX hard-coded blocksize */
+ struct crypto_tfm *tfm;
+ int pos;
+ struct xdr_buf *outbuf;
+ struct page **pages;
+ struct scatterlist infrags[4];
+ struct scatterlist outfrags[4];
+ int fragno;
+ int fraglen;
+};
+
+static int
+encryptor(struct scatterlist *sg, void *data)
+{
+ struct encryptor_desc *desc = data;
+ struct xdr_buf *outbuf = desc->outbuf;
+ struct page *in_page;
+ int thislen = desc->fraglen + sg->length;
+ int fraglen, ret;
+ int page_pos;
+
+ /* Worst case is 4 fragments: head, end of page 1, start
+ * of page 2, tail. Anything more is a bug. */
+ BUG_ON(desc->fragno > 3);
+ desc->infrags[desc->fragno] = *sg;
+ desc->outfrags[desc->fragno] = *sg;
+
+ page_pos = desc->pos - outbuf->head[0].iov_len;
+ if (page_pos >= 0 && page_pos < outbuf->page_len) {
+ /* pages are not in place: */
+ int i = (page_pos + outbuf->page_base) >> PAGE_CACHE_SHIFT;
+ in_page = desc->pages[i];
+ } else {
+ in_page = sg->page;
+ }
+ desc->infrags[desc->fragno].page = in_page;
+ desc->fragno++;
+ desc->fraglen += sg->length;
+ desc->pos += sg->length;
+
+ fraglen = thislen & 7; /* XXX hardcoded blocksize */
+ thislen -= fraglen;
+
+ if (thislen == 0)
+ return 0;
+
+ ret = crypto_cipher_encrypt_iv(desc->tfm, desc->outfrags, desc->infrags,
+ thislen, desc->iv);
+ if (ret)
+ return ret;
+ if (fraglen) {
+ desc->outfrags[0].page = sg->page;
+ desc->outfrags[0].offset = sg->offset + sg->length - fraglen;
+ desc->outfrags[0].length = fraglen;
+ desc->infrags[0] = desc->outfrags[0];
+ desc->infrags[0].page = in_page;
+ desc->fragno = 1;
+ desc->fraglen = fraglen;
+ } else {
+ desc->fragno = 0;
+ desc->fraglen = 0;
+ }
+ return 0;
+}
+
+int
+gss_encrypt_xdr_buf(struct crypto_tfm *tfm, struct xdr_buf *buf, int offset,
+ struct page **pages)
+{
+ int ret;
+ struct encryptor_desc desc;
+
+ BUG_ON((buf->len - offset) % crypto_tfm_alg_blocksize(tfm) != 0);
+
+ memset(desc.iv, 0, sizeof(desc.iv));
+ desc.tfm = tfm;
+ desc.pos = offset;
+ desc.outbuf = buf;
+ desc.pages = pages;
+ desc.fragno = 0;
+ desc.fraglen = 0;
+
+ ret = process_xdr_buf(buf, offset, buf->len - offset, encryptor, &desc);
+ return ret;
+}
+
+EXPORT_SYMBOL(gss_encrypt_xdr_buf);
+
+struct decryptor_desc {
+ u8 iv[8]; /* XXX hard-coded blocksize */
+ struct crypto_tfm *tfm;
+ struct scatterlist frags[4];
+ int fragno;
+ int fraglen;
+};
+
+static int
+decryptor(struct scatterlist *sg, void *data)
+{
+ struct decryptor_desc *desc = data;
+ int thislen = desc->fraglen + sg->length;
+ int fraglen, ret;
+
+ /* Worst case is 4 fragments: head, end of page 1, start
+ * of page 2, tail. Anything more is a bug. */
+ BUG_ON(desc->fragno > 3);
+ desc->frags[desc->fragno] = *sg;
+ desc->fragno++;
+ desc->fraglen += sg->length;
+
+ fraglen = thislen & 7; /* XXX hardcoded blocksize */
+ thislen -= fraglen;
+
+ if (thislen == 0)
+ return 0;
+
+ ret = crypto_cipher_decrypt_iv(desc->tfm, desc->frags, desc->frags,
+ thislen, desc->iv);
+ if (ret)
+ return ret;
+ if (fraglen) {
+ desc->frags[0].page = sg->page;
+ desc->frags[0].offset = sg->offset + sg->length - fraglen;
+ desc->frags[0].length = fraglen;
+ desc->fragno = 1;
+ desc->fraglen = fraglen;
+ } else {
+ desc->fragno = 0;
+ desc->fraglen = 0;
+ }
+ return 0;
+}
+
+int
+gss_decrypt_xdr_buf(struct crypto_tfm *tfm, struct xdr_buf *buf, int offset)
+{
+ struct decryptor_desc desc;
+
+ /* XXXJBF: */
+ BUG_ON((buf->len - offset) % crypto_tfm_alg_blocksize(tfm) != 0);
+
+ memset(desc.iv, 0, sizeof(desc.iv));
+ desc.tfm = tfm;
+ desc.fragno = 0;
+ desc.fraglen = 0;
+ return process_xdr_buf(buf, offset, buf->len - offset, decryptor, &desc);
+}
+
+EXPORT_SYMBOL(gss_decrypt_xdr_buf);
diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c
index 606a8a82caf..5f1f806a0b1 100644
--- a/net/sunrpc/auth_gss/gss_krb5_mech.c
+++ b/net/sunrpc/auth_gss/gss_krb5_mech.c
@@ -39,7 +39,6 @@
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/sunrpc/auth.h>
-#include <linux/in.h>
#include <linux/sunrpc/gss_krb5.h>
#include <linux/sunrpc/xdr.h>
#include <linux/crypto.h>
@@ -191,43 +190,12 @@ gss_delete_sec_context_kerberos(void *internal_ctx) {
kfree(kctx);
}
-static u32
-gss_verify_mic_kerberos(struct gss_ctx *ctx,
- struct xdr_buf *message,
- struct xdr_netobj *mic_token,
- u32 *qstate) {
- u32 maj_stat = 0;
- int qop_state;
- struct krb5_ctx *kctx = ctx->internal_ctx_id;
-
- maj_stat = krb5_read_token(kctx, mic_token, message, &qop_state,
- KG_TOK_MIC_MSG);
- if (!maj_stat && qop_state)
- *qstate = qop_state;
-
- dprintk("RPC: gss_verify_mic_kerberos returning %d\n", maj_stat);
- return maj_stat;
-}
-
-static u32
-gss_get_mic_kerberos(struct gss_ctx *ctx,
- u32 qop,
- struct xdr_buf *message,
- struct xdr_netobj *mic_token) {
- u32 err = 0;
- struct krb5_ctx *kctx = ctx->internal_ctx_id;
-
- err = krb5_make_token(kctx, qop, message, mic_token, KG_TOK_MIC_MSG);
-
- dprintk("RPC: gss_get_mic_kerberos returning %d\n",err);
-
- return err;
-}
-
static struct gss_api_ops gss_kerberos_ops = {
.gss_import_sec_context = gss_import_sec_context_kerberos,
.gss_get_mic = gss_get_mic_kerberos,
.gss_verify_mic = gss_verify_mic_kerberos,
+ .gss_wrap = gss_wrap_kerberos,
+ .gss_unwrap = gss_unwrap_kerberos,
.gss_delete_sec_context = gss_delete_sec_context_kerberos,
};
@@ -242,6 +210,11 @@ static struct pf_desc gss_kerberos_pfs[] = {
.service = RPC_GSS_SVC_INTEGRITY,
.name = "krb5i",
},
+ [2] = {
+ .pseudoflavor = RPC_AUTH_GSS_KRB5P,
+ .service = RPC_GSS_SVC_PRIVACY,
+ .name = "krb5p",
+ },
};
static struct gss_api_mech gss_kerberos_mech = {
diff --git a/net/sunrpc/auth_gss/gss_krb5_seal.c b/net/sunrpc/auth_gss/gss_krb5_seal.c
index afeeb8715a7..13f8ae97945 100644
--- a/net/sunrpc/auth_gss/gss_krb5_seal.c
+++ b/net/sunrpc/auth_gss/gss_krb5_seal.c
@@ -70,22 +70,13 @@
# define RPCDBG_FACILITY RPCDBG_AUTH
#endif
-static inline int
-gss_krb5_padding(int blocksize, int length) {
- /* Most of the code is block-size independent but in practice we
- * use only 8: */
- BUG_ON(blocksize != 8);
- return 8 - (length & 7);
-}
-
u32
-krb5_make_token(struct krb5_ctx *ctx, int qop_req,
- struct xdr_buf *text, struct xdr_netobj *token,
- int toktype)
+gss_get_mic_kerberos(struct gss_ctx *gss_ctx, struct xdr_buf *text,
+ struct xdr_netobj *token)
{
+ struct krb5_ctx *ctx = gss_ctx->internal_ctx_id;
s32 checksum_type;
struct xdr_netobj md5cksum = {.len = 0, .data = NULL};
- int blocksize = 0, tmsglen;
unsigned char *ptr, *krb5_hdr, *msg_start;
s32 now;
@@ -93,9 +84,6 @@ krb5_make_token(struct krb5_ctx *ctx, int qop_req,
now = get_seconds();
- if (qop_req != 0)
- goto out_err;
-
switch (ctx->signalg) {
case SGN_ALG_DES_MAC_MD5:
checksum_type = CKSUMTYPE_RSA_MD5;
@@ -111,21 +99,13 @@ krb5_make_token(struct krb5_ctx *ctx, int qop_req,
goto out_err;
}
- if (toktype == KG_TOK_WRAP_MSG) {
- blocksize = crypto_tfm_alg_blocksize(ctx->enc);
- tmsglen = blocksize + text->len
- + gss_krb5_padding(blocksize, blocksize + text->len);
- } else {
- tmsglen = 0;
- }
-
- token->len = g_token_size(&ctx->mech_used, 22 + tmsglen);
+ token->len = g_token_size(&ctx->mech_used, 22);
ptr = token->data;
- g_make_token_header(&ctx->mech_used, 22 + tmsglen, &ptr);
+ g_make_token_header(&ctx->mech_used, 22, &ptr);
- *ptr++ = (unsigned char) ((toktype>>8)&0xff);
- *ptr++ = (unsigned char) (toktype&0xff);
+ *ptr++ = (unsigned char) ((KG_TOK_MIC_MSG>>8)&0xff);
+ *ptr++ = (unsigned char) (KG_TOK_MIC_MSG&0xff);
/* ptr now at byte 2 of header described in rfc 1964, section 1.2.1: */
krb5_hdr = ptr - 2;
@@ -133,17 +113,9 @@ krb5_make_token(struct krb5_ctx *ctx, int qop_req,
*(u16 *)(krb5_hdr + 2) = htons(ctx->signalg);
memset(krb5_hdr + 4, 0xff, 4);
- if (toktype == KG_TOK_WRAP_MSG)
- *(u16 *)(krb5_hdr + 4) = htons(ctx->sealalg);
- if (toktype == KG_TOK_WRAP_MSG) {
- /* XXX removing support for now */
- goto out_err;
- } else { /* Sign only. */
- if (make_checksum(checksum_type, krb5_hdr, 8, text,
- &md5cksum))
+ if (make_checksum(checksum_type, krb5_hdr, 8, text, 0, &md5cksum))
goto out_err;
- }
switch (ctx->signalg) {
case SGN_ALG_DES_MAC_MD5:
diff --git a/net/sunrpc/auth_gss/gss_krb5_unseal.c b/net/sunrpc/auth_gss/gss_krb5_unseal.c
index 8767fc53183..2030475d98e 100644
--- a/net/sunrpc/auth_gss/gss_krb5_unseal.c
+++ b/net/sunrpc/auth_gss/gss_krb5_unseal.c
@@ -68,21 +68,14 @@
#endif
-/* message_buffer is an input if toktype is MIC and an output if it is WRAP:
- * If toktype is MIC: read_token is a mic token, and message_buffer is the
- * data that the mic was supposedly taken over.
- * If toktype is WRAP: read_token is a wrap token, and message_buffer is used
- * to return the decrypted data.
- */
+/* read_token is a mic token, and message_buffer is the data that the mic was
+ * supposedly taken over. */
-/* XXX will need to change prototype and/or just split into a separate function
- * when we add privacy (because read_token will be in pages too). */
u32
-krb5_read_token(struct krb5_ctx *ctx,
- struct xdr_netobj *read_token,
- struct xdr_buf *message_buffer,
- int *qop_state, int toktype)
+gss_verify_mic_kerberos(struct gss_ctx *gss_ctx,
+ struct xdr_buf *message_buffer, struct xdr_netobj *read_token)
{
+ struct krb5_ctx *ctx = gss_ctx->internal_ctx_id;
int signalg;
int sealalg;
s32 checksum_type;
@@ -100,16 +93,12 @@ krb5_read_token(struct krb5_ctx *ctx,
read_token->len))
goto out;
- if ((*ptr++ != ((toktype>>8)&0xff)) || (*ptr++ != (toktype&0xff)))
+ if ((*ptr++ != ((KG_TOK_MIC_MSG>>8)&0xff)) ||
+ (*ptr++ != ( KG_TOK_MIC_MSG &0xff)) )
goto out;
/* XXX sanity-check bodysize?? */
- if (toktype == KG_TOK_WRAP_MSG) {
- /* XXX gone */
- goto out;
- }
-
/* get the sign and seal algorithms */
signalg = ptr[0] + (ptr[1] << 8);
@@ -120,14 +109,7 @@ krb5_read_token(struct krb5_ctx *ctx,
if ((ptr[4] != 0xff) || (ptr[5] != 0xff))
goto out;
- if (((toktype != KG_TOK_WRAP_MSG) && (sealalg != 0xffff)) ||
- ((toktype == KG_TOK_WRAP_MSG) && (sealalg == 0xffff)))
- goto out;
-
- /* in the current spec, there is only one valid seal algorithm per
- key type, so a simple comparison is ok */
-
- if ((toktype == KG_TOK_WRAP_MSG) && !(sealalg == ctx->sealalg))
+ if (sealalg != 0xffff)
goto out;
/* there are several mappings of seal algorithms to sign algorithms,
@@ -154,7 +136,7 @@ krb5_read_token(struct krb5_ctx *ctx,
switch (signalg) {
case SGN_ALG_DES_MAC_MD5:
ret = make_checksum(checksum_type, ptr - 2, 8,
- message_buffer, &md5cksum);
+ message_buffer, 0, &md5cksum);
if (ret)
goto out;
@@ -175,9 +157,6 @@ krb5_read_token(struct krb5_ctx *ctx,
/* it got through unscathed. Make sure the context is unexpired */
- if (qop_state)
- *qop_state = GSS_C_QOP_DEFAULT;
-
now = get_seconds();
ret = GSS_S_CONTEXT_EXPIRED;
diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c
new file mode 100644
index 00000000000..af777cf9f25
--- /dev/null
+++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c
@@ -0,0 +1,363 @@
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/sunrpc/gss_krb5.h>
+#include <linux/random.h>
+#include <linux/pagemap.h>
+#include <asm/scatterlist.h>
+#include <linux/crypto.h>
+
+#ifdef RPC_DEBUG
+# define RPCDBG_FACILITY RPCDBG_AUTH
+#endif
+
+static inline int
+gss_krb5_padding(int blocksize, int length)
+{
+ /* Most of the code is block-size independent but currently we
+ * use only 8: */
+ BUG_ON(blocksize != 8);
+ return 8 - (length & 7);
+}
+
+static inline void
+gss_krb5_add_padding(struct xdr_buf *buf, int offset, int blocksize)
+{
+ int padding = gss_krb5_padding(blocksize, buf->len - offset);
+ char *p;
+ struct kvec *iov;
+
+ if (buf->page_len || buf->tail[0].iov_len)
+ iov = &buf->tail[0];
+ else
+ iov = &buf->head[0];
+ p = iov->iov_base + iov->iov_len;
+ iov->iov_len += padding;
+ buf->len += padding;
+ memset(p, padding, padding);
+}
+
+static inline int
+gss_krb5_remove_padding(struct xdr_buf *buf, int blocksize)
+{
+ u8 *ptr;
+ u8 pad;
+ int len = buf->len;
+
+ if (len <= buf->head[0].iov_len) {
+ pad = *(u8 *)(buf->head[0].iov_base + len - 1);
+ if (pad > buf->head[0].iov_len)
+ return -EINVAL;
+ buf->head[0].iov_len -= pad;
+ goto out;
+ } else
+ len -= buf->head[0].iov_len;
+ if (len <= buf->page_len) {
+ int last = (buf->page_base + len - 1)
+ >>PAGE_CACHE_SHIFT;
+ int offset = (buf->page_base + len - 1)
+ & (PAGE_CACHE_SIZE - 1);
+ ptr = kmap_atomic(buf->pages[last], KM_SKB_SUNRPC_DATA);
+ pad = *(ptr + offset);
+ kunmap_atomic(ptr, KM_SKB_SUNRPC_DATA);
+ goto out;
+ } else
+ len -= buf->page_len;
+ BUG_ON(len > buf->tail[0].iov_len);
+ pad = *(u8 *)(buf->tail[0].iov_base + len - 1);
+out:
+ /* XXX: NOTE: we do not adjust the page lengths--they represent
+ * a range of data in the real filesystem page cache, and we need
+ * to know that range so the xdr code can properly place read data.
+ * However adjusting the head length, as we do above, is harmless.
+ * In the case of a request that fits into a single page, the server
+ * also uses length and head length together to determine the original
+ * start of the request to copy the request for deferal; so it's
+ * easier on the server if we adjust head and tail length in tandem.
+ * It's not really a problem that we don't fool with the page and
+ * tail lengths, though--at worst badly formed xdr might lead the
+ * server to attempt to parse the padding.
+ * XXX: Document all these weird requirements for gss mechanism
+ * wrap/unwrap functions. */
+ if (pad > blocksize)
+ return -EINVAL;
+ if (buf->len > pad)
+ buf->len -= pad;
+ else
+ return -EINVAL;
+ return 0;
+}
+
+static inline void
+make_confounder(char *p, int blocksize)
+{
+ static u64 i = 0;
+ u64 *q = (u64 *)p;
+
+ /* rfc1964 claims this should be "random". But all that's really
+ * necessary is that it be unique. And not even that is necessary in
+ * our case since our "gssapi" implementation exists only to support
+ * rpcsec_gss, so we know that the only buffers we will ever encrypt
+ * already begin with a unique sequence number. Just to hedge my bets
+ * I'll make a half-hearted attempt at something unique, but ensuring
+ * uniqueness would mean worrying about atomicity and rollover, and I
+ * don't care enough. */
+
+ BUG_ON(blocksize != 8);
+ *q = i++;
+}
+
+/* Assumptions: the head and tail of inbuf are ours to play with.
+ * The pages, however, may be real pages in the page cache and we replace
+ * them with scratch pages from **pages before writing to them. */
+/* XXX: obviously the above should be documentation of wrap interface,
+ * and shouldn't be in this kerberos-specific file. */
+
+/* XXX factor out common code with seal/unseal. */
+
+u32
+gss_wrap_kerberos(struct gss_ctx *ctx, int offset,
+ struct xdr_buf *buf, struct page **pages)
+{
+ struct krb5_ctx *kctx = ctx->internal_ctx_id;
+ s32 checksum_type;
+ struct xdr_netobj md5cksum = {.len = 0, .data = NULL};
+ int blocksize = 0, plainlen;
+ unsigned char *ptr, *krb5_hdr, *msg_start;
+ s32 now;
+ int headlen;
+ struct page **tmp_pages;
+
+ dprintk("RPC: gss_wrap_kerberos\n");
+
+ now = get_seconds();
+
+ switch (kctx->signalg) {
+ case SGN_ALG_DES_MAC_MD5:
+ checksum_type = CKSUMTYPE_RSA_MD5;
+ break;
+ default:
+ dprintk("RPC: gss_krb5_seal: kctx->signalg %d not"
+ " supported\n", kctx->signalg);
+ goto out_err;
+ }
+ if (kctx->sealalg != SEAL_ALG_NONE && kctx->sealalg != SEAL_ALG_DES) {
+ dprintk("RPC: gss_krb5_seal: kctx->sealalg %d not supported\n",
+ kctx->sealalg);
+ goto out_err;
+ }
+
+ blocksize = crypto_tfm_alg_blocksize(kctx->enc);
+ gss_krb5_add_padding(buf, offset, blocksize);
+ BUG_ON((buf->len - offset) % blocksize);
+ plainlen = blocksize + buf->len - offset;
+
+ headlen = g_token_size(&kctx->mech_used, 22 + plainlen) -
+ (buf->len - offset);
+
+ ptr = buf->head[0].iov_base + offset;
+ /* shift data to make room for header. */
+ /* XXX Would be cleverer to encrypt while copying. */
+ /* XXX bounds checking, slack, etc. */
+ memmove(ptr + headlen, ptr, buf->head[0].iov_len - offset);
+ buf->head[0].iov_len += headlen;
+ buf->len += headlen;
+ BUG_ON((buf->len - offset - headlen) % blocksize);
+
+ g_make_token_header(&kctx->mech_used, 22 + plainlen, &ptr);
+
+
+ *ptr++ = (unsigned char) ((KG_TOK_WRAP_MSG>>8)&0xff);
+ *ptr++ = (unsigned char) (KG_TOK_WRAP_MSG&0xff);
+
+ /* ptr now at byte 2 of header described in rfc 1964, section 1.2.1: */
+ krb5_hdr = ptr - 2;
+ msg_start = krb5_hdr + 24;
+ /* XXXJBF: */ BUG_ON(buf->head[0].iov_base + offset + headlen != msg_start + blocksize);
+
+ *(u16 *)(krb5_hdr + 2) = htons(kctx->signalg);
+ memset(krb5_hdr + 4, 0xff, 4);
+ *(u16 *)(krb5_hdr + 4) = htons(kctx->sealalg);
+
+ make_confounder(msg_start, blocksize);
+
+ /* XXXJBF: UGH!: */
+ tmp_pages = buf->pages;
+ buf->pages = pages;
+ if (make_checksum(checksum_type, krb5_hdr, 8, buf,
+ offset + headlen - blocksize, &md5cksum))
+ goto out_err;
+ buf->pages = tmp_pages;
+
+ switch (kctx->signalg) {
+ case SGN_ALG_DES_MAC_MD5:
+ if (krb5_encrypt(kctx->seq, NULL, md5cksum.data,
+ md5cksum.data, md5cksum.len))
+ goto out_err;
+ memcpy(krb5_hdr + 16,
+ md5cksum.data + md5cksum.len - KRB5_CKSUM_LENGTH,
+ KRB5_CKSUM_LENGTH);
+
+ dprintk("RPC: make_seal_token: cksum data: \n");
+ print_hexl((u32 *) (krb5_hdr + 16), KRB5_CKSUM_LENGTH, 0);
+ break;
+ default:
+ BUG();
+ }
+
+ kfree(md5cksum.data);
+
+ /* XXX would probably be more efficient to compute checksum
+ * and encrypt at the same time: */
+ if ((krb5_make_seq_num(kctx->seq, kctx->initiate ? 0 : 0xff,
+ kctx->seq_send, krb5_hdr + 16, krb5_hdr + 8)))
+ goto out_err;
+
+ if (gss_encrypt_xdr_buf(kctx->enc, buf, offset + headlen - blocksize,
+ pages))
+ goto out_err;
+
+ kctx->seq_send++;
+
+ return ((kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE);
+out_err:
+ if (md5cksum.data) kfree(md5cksum.data);
+ return GSS_S_FAILURE;
+}
+
+u32
+gss_unwrap_kerberos(struct gss_ctx *ctx, int offset, struct xdr_buf *buf)
+{
+ struct krb5_ctx *kctx = ctx->internal_ctx_id;
+ int signalg;
+ int sealalg;
+ s32 checksum_type;
+ struct xdr_netobj md5cksum = {.len = 0, .data = NULL};
+ s32 now;
+ int direction;
+ s32 seqnum;
+ unsigned char *ptr;
+ int bodysize;
+ u32 ret = GSS_S_DEFECTIVE_TOKEN;
+ void *data_start, *orig_start;
+ int data_len;
+ int blocksize;
+
+ dprintk("RPC: gss_unwrap_kerberos\n");
+
+ ptr = (u8 *)buf->head[0].iov_base + offset;
+ if (g_verify_token_header(&kctx->mech_used, &bodysize, &ptr,
+ buf->len - offset))
+ goto out;
+
+ if ((*ptr++ != ((KG_TOK_WRAP_MSG>>8)&0xff)) ||
+ (*ptr++ != (KG_TOK_WRAP_MSG &0xff)) )
+ goto out;
+
+ /* XXX sanity-check bodysize?? */
+
+ /* get the sign and seal algorithms */
+
+ signalg = ptr[0] + (ptr[1] << 8);
+ sealalg = ptr[2] + (ptr[3] << 8);
+
+ /* Sanity checks */
+
+ if ((ptr[4] != 0xff) || (ptr[5] != 0xff))
+ goto out;
+
+ if (sealalg == 0xffff)
+ goto out;
+
+ /* in the current spec, there is only one valid seal algorithm per
+ key type, so a simple comparison is ok */
+
+ if (sealalg != kctx->sealalg)
+ goto out;
+
+ /* there are several mappings of seal algorithms to sign algorithms,
+ but few enough that we can try them all. */
+
+ if ((kctx->sealalg == SEAL_ALG_NONE && signalg > 1) ||
+ (kctx->sealalg == SEAL_ALG_1 && signalg != SGN_ALG_3) ||
+ (kctx->sealalg == SEAL_ALG_DES3KD &&
+ signalg != SGN_ALG_HMAC_SHA1_DES3_KD))
+ goto out;
+
+ if (gss_decrypt_xdr_buf(kctx->enc, buf,
+ ptr + 22 - (unsigned char *)buf->head[0].iov_base))
+ goto out;
+
+ /* compute the checksum of the message */
+
+ /* initialize the the cksum */
+ switch (signalg) {
+ case SGN_ALG_DES_MAC_MD5:
+ checksum_type = CKSUMTYPE_RSA_MD5;
+ break;
+ default:
+ ret = GSS_S_DEFECTIVE_TOKEN;
+ goto out;
+ }
+
+ switch (signalg) {
+ case SGN_ALG_DES_MAC_MD5:
+ ret = make_checksum(checksum_type, ptr - 2, 8, buf,
+ ptr + 22 - (unsigned char *)buf->head[0].iov_base, &md5cksum);
+ if (ret)
+ goto out;
+
+ ret = krb5_encrypt(kctx->seq, NULL, md5cksum.data,
+ md5cksum.data, md5cksum.len);
+ if (ret)
+ goto out;
+
+ if (memcmp(md5cksum.data + 8, ptr + 14, 8)) {
+ ret = GSS_S_BAD_SIG;
+ goto out;
+ }
+ break;
+ default:
+ ret = GSS_S_DEFECTIVE_TOKEN;
+ goto out;
+ }
+
+ /* it got through unscathed. Make sure the context is unexpired */
+
+ now = get_seconds();
+
+ ret = GSS_S_CONTEXT_EXPIRED;
+ if (now > kctx->endtime)
+ goto out;
+
+ /* do sequencing checks */
+
+ ret = GSS_S_BAD_SIG;
+ if ((ret = krb5_get_seq_num(kctx->seq, ptr + 14, ptr + 6, &direction,
+ &seqnum)))
+ goto out;
+
+ if ((kctx->initiate && direction != 0xff) ||
+ (!kctx->initiate && direction != 0))
+ goto out;
+
+ /* Copy the data back to the right position. XXX: Would probably be
+ * better to copy and encrypt at the same time. */
+
+ blocksize = crypto_tfm_alg_blocksize(kctx->enc);
+ data_start = ptr + 22 + blocksize;
+ orig_start = buf->head[0].iov_base + offset;
+ data_len = (buf->head[0].iov_base + buf->head[0].iov_len) - data_start;
+ memmove(orig_start, data_start, data_len);
+ buf->head[0].iov_len -= (data_start - orig_start);
+ buf->len -= (data_start - orig_start);
+
+ ret = GSS_S_DEFECTIVE_TOKEN;
+ if (gss_krb5_remove_padding(buf, blocksize))
+ goto out;
+
+ ret = GSS_S_COMPLETE;
+out:
+ if (md5cksum.data) kfree(md5cksum.data);
+ return ret;
+}
diff --git a/net/sunrpc/auth_gss/gss_mech_switch.c b/net/sunrpc/auth_gss/gss_mech_switch.c
index 9dfb68377d6..b048bf672da 100644
--- a/net/sunrpc/auth_gss/gss_mech_switch.c
+++ b/net/sunrpc/auth_gss/gss_mech_switch.c
@@ -35,7 +35,6 @@
#include <linux/types.h>
#include <linux/slab.h>
-#include <linux/socket.h>
#include <linux/module.h>
#include <linux/sunrpc/msg_prot.h>
#include <linux/sunrpc/gss_asn1.h>
@@ -251,13 +250,11 @@ gss_import_sec_context(const void *input_token, size_t bufsize,
u32
gss_get_mic(struct gss_ctx *context_handle,
- u32 qop,
struct xdr_buf *message,
struct xdr_netobj *mic_token)
{
return context_handle->mech_type->gm_ops
->gss_get_mic(context_handle,
- qop,
message,
mic_token);
}
@@ -267,16 +264,34 @@ gss_get_mic(struct gss_ctx *context_handle,
u32
gss_verify_mic(struct gss_ctx *context_handle,
struct xdr_buf *message,
- struct xdr_netobj *mic_token,
- u32 *qstate)
+ struct xdr_netobj *mic_token)
{
return context_handle->mech_type->gm_ops
->gss_verify_mic(context_handle,
message,
- mic_token,
- qstate);
+ mic_token);
}
+u32
+gss_wrap(struct gss_ctx *ctx_id,
+ int offset,
+ struct xdr_buf *buf,
+ struct page **inpages)
+{
+ return ctx_id->mech_type->gm_ops
+ ->gss_wrap(ctx_id, offset, buf, inpages);
+}
+
+u32
+gss_unwrap(struct gss_ctx *ctx_id,
+ int offset,
+ struct xdr_buf *buf)
+{
+ return ctx_id->mech_type->gm_ops
+ ->gss_unwrap(ctx_id, offset, buf);
+}
+
+
/* gss_delete_sec_context: free all resources associated with context_handle.
* Note this differs from the RFC 2744-specified prototype in that we don't
* bother returning an output token, since it would never be used anyway. */
diff --git a/net/sunrpc/auth_gss/gss_spkm3_mech.c b/net/sunrpc/auth_gss/gss_spkm3_mech.c
index 6c97d61baa9..39b3edc1469 100644
--- a/net/sunrpc/auth_gss/gss_spkm3_mech.c
+++ b/net/sunrpc/auth_gss/gss_spkm3_mech.c
@@ -224,18 +224,13 @@ gss_delete_sec_context_spkm3(void *internal_ctx) {
static u32
gss_verify_mic_spkm3(struct gss_ctx *ctx,
struct xdr_buf *signbuf,
- struct xdr_netobj *checksum,
- u32 *qstate) {
+ struct xdr_netobj *checksum)
+{
u32 maj_stat = 0;
- int qop_state = 0;
struct spkm3_ctx *sctx = ctx->internal_ctx_id;
dprintk("RPC: gss_verify_mic_spkm3 calling spkm3_read_token\n");
- maj_stat = spkm3_read_token(sctx, checksum, signbuf, &qop_state,
- SPKM_MIC_TOK);
-
- if (!maj_stat && qop_state)
- *qstate = qop_state;
+ maj_stat = spkm3_read_token(sctx, checksum, signbuf, SPKM_MIC_TOK);
dprintk("RPC: gss_verify_mic_spkm3 returning %d\n", maj_stat);
return maj_stat;
@@ -243,15 +238,15 @@ gss_verify_mic_spkm3(struct gss_ctx *ctx,
static u32
gss_get_mic_spkm3(struct gss_ctx *ctx,
- u32 qop,
struct xdr_buf *message_buffer,
- struct xdr_netobj *message_token) {
+ struct xdr_netobj *message_token)
+{
u32 err = 0;
struct spkm3_ctx *sctx = ctx->internal_ctx_id;
dprintk("RPC: gss_get_mic_spkm3\n");
- err = spkm3_make_token(sctx, qop, message_buffer,
+ err = spkm3_make_token(sctx, message_buffer,
message_token, SPKM_MIC_TOK);
return err;
}
@@ -264,8 +259,8 @@ static struct gss_api_ops gss_spkm3_ops = {
};
static struct pf_desc gss_spkm3_pfs[] = {
- {RPC_AUTH_GSS_SPKM, 0, RPC_GSS_SVC_NONE, "spkm3"},
- {RPC_AUTH_GSS_SPKMI, 0, RPC_GSS_SVC_INTEGRITY, "spkm3i"},
+ {RPC_AUTH_GSS_SPKM, RPC_GSS_SVC_NONE, "spkm3"},
+ {RPC_AUTH_GSS_SPKMI, RPC_GSS_SVC_INTEGRITY, "spkm3i"},
};
static struct gss_api_mech gss_spkm3_mech = {
diff --git a/net/sunrpc/auth_gss/gss_spkm3_seal.c b/net/sunrpc/auth_gss/gss_spkm3_seal.c
index 25339868d46..148201e929d 100644
--- a/net/sunrpc/auth_gss/gss_spkm3_seal.c
+++ b/net/sunrpc/auth_gss/gss_spkm3_seal.c
@@ -51,7 +51,7 @@
*/
u32
-spkm3_make_token(struct spkm3_ctx *ctx, int qop_req,
+spkm3_make_token(struct spkm3_ctx *ctx,
struct xdr_buf * text, struct xdr_netobj * token,
int toktype)
{
@@ -68,8 +68,6 @@ spkm3_make_token(struct spkm3_ctx *ctx, int qop_req,
dprintk("RPC: spkm3_make_token\n");
now = jiffies;
- if (qop_req != 0)
- goto out_err;
if (ctx->ctx_id.len != 16) {
dprintk("RPC: spkm3_make_token BAD ctx_id.len %d\n",
diff --git a/net/sunrpc/auth_gss/gss_spkm3_unseal.c b/net/sunrpc/auth_gss/gss_spkm3_unseal.c
index 65ce81bf0bc..c3c0d958610 100644
--- a/net/sunrpc/auth_gss/gss_spkm3_unseal.c
+++ b/net/sunrpc/auth_gss/gss_spkm3_unseal.c
@@ -52,7 +52,7 @@ u32
spkm3_read_token(struct spkm3_ctx *ctx,
struct xdr_netobj *read_token, /* checksum */
struct xdr_buf *message_buffer, /* signbuf */
- int *qop_state, int toktype)
+ int toktype)
{
s32 code;
struct xdr_netobj wire_cksum = {.len =0, .data = NULL};
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index e3308195374..e4ada15ed85 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -566,8 +566,7 @@ gss_verify_header(struct svc_rqst *rqstp, struct rsc *rsci,
if (rqstp->rq_deferred) /* skip verification of revisited request */
return SVC_OK;
- if (gss_verify_mic(ctx_id, &rpchdr, &checksum, NULL)
- != GSS_S_COMPLETE) {
+ if (gss_verify_mic(ctx_id, &rpchdr, &checksum) != GSS_S_COMPLETE) {
*authp = rpcsec_gsserr_credproblem;
return SVC_DENIED;
}
@@ -604,7 +603,7 @@ gss_write_verf(struct svc_rqst *rqstp, struct gss_ctx *ctx_id, u32 seq)
xdr_buf_from_iov(&iov, &verf_data);
p = rqstp->rq_res.head->iov_base + rqstp->rq_res.head->iov_len;
mic.data = (u8 *)(p + 1);
- maj_stat = gss_get_mic(ctx_id, 0, &verf_data, &mic);
+ maj_stat = gss_get_mic(ctx_id, &verf_data, &mic);
if (maj_stat != GSS_S_COMPLETE)
return -1;
*p++ = htonl(mic.len);
@@ -710,7 +709,7 @@ unwrap_integ_data(struct xdr_buf *buf, u32 seq, struct gss_ctx *ctx)
goto out;
if (read_bytes_from_xdr_buf(buf, integ_len + 4, mic.data, mic.len))
goto out;
- maj_stat = gss_verify_mic(ctx, &integ_buf, &mic, NULL);
+ maj_stat = gss_verify_mic(ctx, &integ_buf, &mic);
if (maj_stat != GSS_S_COMPLETE)
goto out;
if (ntohl(svc_getu32(&buf->head[0])) != seq)
@@ -1012,7 +1011,7 @@ svcauth_gss_release(struct svc_rqst *rqstp)
resv = &resbuf->tail[0];
}
mic.data = (u8 *)resv->iov_base + resv->iov_len + 4;
- if (gss_get_mic(gsd->rsci->mechctx, 0, &integ_buf, &mic))
+ if (gss_get_mic(gsd->rsci->mechctx, &integ_buf, &mic))
goto out_err;
svc_putu32(resv, htonl(mic.len));
memset(mic.data + mic.len, 0,
diff --git a/net/sunrpc/auth_null.c b/net/sunrpc/auth_null.c
index 9b72d3abf82..f56767aaa92 100644
--- a/net/sunrpc/auth_null.c
+++ b/net/sunrpc/auth_null.c
@@ -7,9 +7,7 @@
*/
#include <linux/types.h>
-#include <linux/socket.h>
#include <linux/module.h>
-#include <linux/in.h>
#include <linux/utsname.h>
#include <linux/sunrpc/clnt.h>
#include <linux/sched.h>
diff --git a/net/sunrpc/auth_unix.c b/net/sunrpc/auth_unix.c
index 4ff297a9b15..890fb5ea0dc 100644
--- a/net/sunrpc/auth_unix.c
+++ b/net/sunrpc/auth_unix.c
@@ -9,8 +9,6 @@
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/module.h>
-#include <linux/socket.h>
-#include <linux/in.h>
#include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/auth.h>
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index f17e6153b68..702ede309b0 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -1,5 +1,5 @@
/*
- * linux/net/sunrpc/rpcclnt.c
+ * linux/net/sunrpc/clnt.c
*
* This file contains the high-level RPC interface.
* It is modeled as a finite state machine to support both synchronous
@@ -27,7 +27,6 @@
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/slab.h>
-#include <linux/in.h>
#include <linux/utsname.h>
#include <linux/sunrpc/clnt.h>
@@ -53,6 +52,7 @@ static void call_allocate(struct rpc_task *task);
static void call_encode(struct rpc_task *task);
static void call_decode(struct rpc_task *task);
static void call_bind(struct rpc_task *task);
+static void call_bind_status(struct rpc_task *task);
static void call_transmit(struct rpc_task *task);
static void call_status(struct rpc_task *task);
static void call_refresh(struct rpc_task *task);
@@ -517,15 +517,8 @@ void
rpc_setbufsize(struct rpc_clnt *clnt, unsigned int sndsize, unsigned int rcvsize)
{
struct rpc_xprt *xprt = clnt->cl_xprt;
-
- xprt->sndsize = 0;
- if (sndsize)
- xprt->sndsize = sndsize + RPC_SLACK_SPACE;
- xprt->rcvsize = 0;
- if (rcvsize)
- xprt->rcvsize = rcvsize + RPC_SLACK_SPACE;
- if (xprt_connected(xprt))
- xprt_sock_setbufsize(xprt);
+ if (xprt->ops->set_buffer_size)
+ xprt->ops->set_buffer_size(xprt, sndsize, rcvsize);
}
/*
@@ -685,13 +678,11 @@ call_allocate(struct rpc_task *task)
static void
call_encode(struct rpc_task *task)
{
- struct rpc_clnt *clnt = task->tk_client;
struct rpc_rqst *req = task->tk_rqstp;
struct xdr_buf *sndbuf = &req->rq_snd_buf;
struct xdr_buf *rcvbuf = &req->rq_rcv_buf;
unsigned int bufsiz;
kxdrproc_t encode;
- int status;
u32 *p;
dprintk("RPC: %4d call_encode (status %d)\n",
@@ -719,11 +710,15 @@ call_encode(struct rpc_task *task)
rpc_exit(task, -EIO);
return;
}
- if (encode && (status = rpcauth_wrap_req(task, encode, req, p,
- task->tk_msg.rpc_argp)) < 0) {
- printk(KERN_WARNING "%s: can't encode arguments: %d\n",
- clnt->cl_protname, -status);
- rpc_exit(task, status);
+ if (encode == NULL)
+ return;
+
+ task->tk_status = rpcauth_wrap_req(task, encode, req, p,
+ task->tk_msg.rpc_argp);
+ if (task->tk_status == -ENOMEM) {
+ /* XXX: Is this sane? */
+ rpc_delay(task, 3*HZ);
+ task->tk_status = -EAGAIN;
}
}
@@ -734,43 +729,95 @@ static void
call_bind(struct rpc_task *task)
{
struct rpc_clnt *clnt = task->tk_client;
- struct rpc_xprt *xprt = clnt->cl_xprt;
-
- dprintk("RPC: %4d call_bind xprt %p %s connected\n", task->tk_pid,
- xprt, (xprt_connected(xprt) ? "is" : "is not"));
- task->tk_action = (xprt_connected(xprt)) ? call_transmit : call_connect;
+ dprintk("RPC: %4d call_bind (status %d)\n",
+ task->tk_pid, task->tk_status);
+ task->tk_action = call_connect;
if (!clnt->cl_port) {
- task->tk_action = call_connect;
- task->tk_timeout = RPC_CONNECT_TIMEOUT;
+ task->tk_action = call_bind_status;
+ task->tk_timeout = task->tk_xprt->bind_timeout;
rpc_getport(task, clnt);
}
}
/*
- * 4a. Connect to the RPC server (TCP case)
+ * 4a. Sort out bind result
+ */
+static void
+call_bind_status(struct rpc_task *task)
+{
+ int status = -EACCES;
+
+ if (task->tk_status >= 0) {
+ dprintk("RPC: %4d call_bind_status (status %d)\n",
+ task->tk_pid, task->tk_status);
+ task->tk_status = 0;
+ task->tk_action = call_connect;
+ return;
+ }
+
+ switch (task->tk_status) {
+ case -EACCES:
+ dprintk("RPC: %4d remote rpcbind: RPC program/version unavailable\n",
+ task->tk_pid);
+ rpc_delay(task, 3*HZ);
+ goto retry_bind;
+ case -ETIMEDOUT:
+ dprintk("RPC: %4d rpcbind request timed out\n",
+ task->tk_pid);
+ if (RPC_IS_SOFT(task)) {
+ status = -EIO;
+ break;
+ }
+ goto retry_bind;
+ case -EPFNOSUPPORT:
+ dprintk("RPC: %4d remote rpcbind service unavailable\n",
+ task->tk_pid);
+ break;
+ case -EPROTONOSUPPORT:
+ dprintk("RPC: %4d remote rpcbind version 2 unavailable\n",
+ task->tk_pid);
+ break;
+ default:
+ dprintk("RPC: %4d unrecognized rpcbind error (%d)\n",
+ task->tk_pid, -task->tk_status);
+ status = -EIO;
+ break;
+ }
+
+ rpc_exit(task, status);
+ return;
+
+retry_bind:
+ task->tk_status = 0;
+ task->tk_action = call_bind;
+ return;
+}
+
+/*
+ * 4b. Connect to the RPC server
*/
static void
call_connect(struct rpc_task *task)
{
- struct rpc_clnt *clnt = task->tk_client;
+ struct rpc_xprt *xprt = task->tk_xprt;
- dprintk("RPC: %4d call_connect status %d\n",
- task->tk_pid, task->tk_status);
+ dprintk("RPC: %4d call_connect xprt %p %s connected\n",
+ task->tk_pid, xprt,
+ (xprt_connected(xprt) ? "is" : "is not"));
- if (xprt_connected(clnt->cl_xprt)) {
- task->tk_action = call_transmit;
- return;
+ task->tk_action = call_transmit;
+ if (!xprt_connected(xprt)) {
+ task->tk_action = call_connect_status;
+ if (task->tk_status < 0)
+ return;
+ xprt_connect(task);
}
- task->tk_action = call_connect_status;
- if (task->tk_status < 0)
- return;
- xprt_connect(task);
}
/*
- * 4b. Sort out connect result
+ * 4c. Sort out connect result
*/
static void
call_connect_status(struct rpc_task *task)
@@ -778,6 +825,9 @@ call_connect_status(struct rpc_task *task)
struct rpc_clnt *clnt = task->tk_client;
int status = task->tk_status;
+ dprintk("RPC: %5u call_connect_status (status %d)\n",
+ task->tk_pid, task->tk_status);
+
task->tk_status = 0;
if (status >= 0) {
clnt->cl_stats->netreconn++;
@@ -785,17 +835,19 @@ call_connect_status(struct rpc_task *task)
return;
}
- /* Something failed: we may have to rebind */
+ /* Something failed: remote service port may have changed */
if (clnt->cl_autobind)
clnt->cl_port = 0;
+
switch (status) {
case -ENOTCONN:
case -ETIMEDOUT:
case -EAGAIN:
- task->tk_action = (clnt->cl_port == 0) ? call_bind : call_connect;
+ task->tk_action = call_bind;
break;
default:
rpc_exit(task, -EIO);
+ break;
}
}
@@ -815,10 +867,12 @@ call_transmit(struct rpc_task *task)
if (task->tk_status != 0)
return;
/* Encode here so that rpcsec_gss can use correct sequence number. */
- if (!task->tk_rqstp->rq_bytes_sent)
+ if (task->tk_rqstp->rq_bytes_sent == 0) {
call_encode(task);
- if (task->tk_status < 0)
- return;
+ /* Did the encode result in an error condition? */
+ if (task->tk_status != 0)
+ goto out_nosend;
+ }
xprt_transmit(task);
if (task->tk_status < 0)
return;
@@ -826,6 +880,10 @@ call_transmit(struct rpc_task *task)
task->tk_action = NULL;
rpc_wake_up_task(task);
}
+ return;
+out_nosend:
+ /* release socket write lock before attempting to handle error */
+ xprt_abort_transmit(task);
}
/*
@@ -1020,13 +1078,12 @@ static u32 *
call_header(struct rpc_task *task)
{
struct rpc_clnt *clnt = task->tk_client;
- struct rpc_xprt *xprt = clnt->cl_xprt;
struct rpc_rqst *req = task->tk_rqstp;
u32 *p = req->rq_svec[0].iov_base;
/* FIXME: check buffer size? */
- if (xprt->stream)
- *p++ = 0; /* fill in later */
+
+ p = xprt_skip_transport_header(task->tk_xprt, p);
*p++ = req->rq_xid; /* XID */
*p++ = htonl(RPC_CALL); /* CALL */
*p++ = htonl(RPC_VERSION); /* RPC version */
diff --git a/net/sunrpc/pmap_clnt.c b/net/sunrpc/pmap_clnt.c
index 4e81f276692..a398575f94b 100644
--- a/net/sunrpc/pmap_clnt.c
+++ b/net/sunrpc/pmap_clnt.c
@@ -26,7 +26,7 @@
#define PMAP_GETPORT 3
static struct rpc_procinfo pmap_procedures[];
-static struct rpc_clnt * pmap_create(char *, struct sockaddr_in *, int);
+static struct rpc_clnt * pmap_create(char *, struct sockaddr_in *, int, int);
static void pmap_getport_done(struct rpc_task *);
static struct rpc_program pmap_program;
static DEFINE_SPINLOCK(pmap_lock);
@@ -65,7 +65,7 @@ rpc_getport(struct rpc_task *task, struct rpc_clnt *clnt)
map->pm_binding = 1;
spin_unlock(&pmap_lock);
- pmap_clnt = pmap_create(clnt->cl_server, sap, map->pm_prot);
+ pmap_clnt = pmap_create(clnt->cl_server, sap, map->pm_prot, 0);
if (IS_ERR(pmap_clnt)) {
task->tk_status = PTR_ERR(pmap_clnt);
goto bailout;
@@ -112,7 +112,7 @@ rpc_getport_external(struct sockaddr_in *sin, __u32 prog, __u32 vers, int prot)
NIPQUAD(sin->sin_addr.s_addr), prog, vers, prot);
sprintf(hostname, "%u.%u.%u.%u", NIPQUAD(sin->sin_addr.s_addr));
- pmap_clnt = pmap_create(hostname, sin, prot);
+ pmap_clnt = pmap_create(hostname, sin, prot, 0);
if (IS_ERR(pmap_clnt))
return PTR_ERR(pmap_clnt);
@@ -171,7 +171,7 @@ rpc_register(u32 prog, u32 vers, int prot, unsigned short port, int *okay)
sin.sin_family = AF_INET;
sin.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
- pmap_clnt = pmap_create("localhost", &sin, IPPROTO_UDP);
+ pmap_clnt = pmap_create("localhost", &sin, IPPROTO_UDP, 1);
if (IS_ERR(pmap_clnt)) {
error = PTR_ERR(pmap_clnt);
dprintk("RPC: couldn't create pmap client. Error = %d\n", error);
@@ -198,7 +198,7 @@ rpc_register(u32 prog, u32 vers, int prot, unsigned short port, int *okay)
}
static struct rpc_clnt *
-pmap_create(char *hostname, struct sockaddr_in *srvaddr, int proto)
+pmap_create(char *hostname, struct sockaddr_in *srvaddr, int proto, int privileged)
{
struct rpc_xprt *xprt;
struct rpc_clnt *clnt;
@@ -208,6 +208,8 @@ pmap_create(char *hostname, struct sockaddr_in *srvaddr, int proto)
if (IS_ERR(xprt))
return (struct rpc_clnt *)xprt;
xprt->addr.sin_port = htons(RPC_PMAP_PORT);
+ if (!privileged)
+ xprt->resvport = 0;
/* printk("pmap: create clnt\n"); */
clnt = rpc_new_client(xprt, hostname,
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index ded6c63f11e..4f188d0a5d1 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -76,25 +76,35 @@ int
rpc_queue_upcall(struct inode *inode, struct rpc_pipe_msg *msg)
{
struct rpc_inode *rpci = RPC_I(inode);
- int res = 0;
+ int res = -EPIPE;
down(&inode->i_sem);
+ if (rpci->ops == NULL)
+ goto out;
if (rpci->nreaders) {
list_add_tail(&msg->list, &rpci->pipe);
rpci->pipelen += msg->len;
+ res = 0;
} else if (rpci->flags & RPC_PIPE_WAIT_FOR_OPEN) {
if (list_empty(&rpci->pipe))
schedule_delayed_work(&rpci->queue_timeout,
RPC_UPCALL_TIMEOUT);
list_add_tail(&msg->list, &rpci->pipe);
rpci->pipelen += msg->len;
- } else
- res = -EPIPE;
+ res = 0;
+ }
+out:
up(&inode->i_sem);
wake_up(&rpci->waitq);
return res;
}
+static inline void
+rpc_inode_setowner(struct inode *inode, void *private)
+{
+ RPC_I(inode)->private = private;
+}
+
static void
rpc_close_pipes(struct inode *inode)
{
@@ -111,15 +121,10 @@ rpc_close_pipes(struct inode *inode)
rpci->ops->release_pipe(inode);
rpci->ops = NULL;
}
+ rpc_inode_setowner(inode, NULL);
up(&inode->i_sem);
}
-static inline void
-rpc_inode_setowner(struct inode *inode, void *private)
-{
- RPC_I(inode)->private = private;
-}
-
static struct inode *
rpc_alloc_inode(struct super_block *sb)
{
@@ -501,7 +506,6 @@ repeat:
dentry = dvec[--n];
if (dentry->d_inode) {
rpc_close_pipes(dentry->d_inode);
- rpc_inode_setowner(dentry->d_inode, NULL);
simple_unlink(dir, dentry);
}
dput(dentry);
@@ -576,10 +580,8 @@ __rpc_rmdir(struct inode *dir, struct dentry *dentry)
int error;
shrink_dcache_parent(dentry);
- if (dentry->d_inode) {
+ if (dentry->d_inode)
rpc_close_pipes(dentry->d_inode);
- rpc_inode_setowner(dentry->d_inode, NULL);
- }
if ((error = simple_rmdir(dir, dentry)) != 0)
return error;
if (!error) {
@@ -732,7 +734,6 @@ rpc_unlink(char *path)
d_drop(dentry);
if (dentry->d_inode) {
rpc_close_pipes(dentry->d_inode);
- rpc_inode_setowner(dentry->d_inode, NULL);
error = simple_unlink(dir, dentry);
}
dput(dentry);
diff --git a/net/sunrpc/socklib.c b/net/sunrpc/socklib.c
new file mode 100644
index 00000000000..8f97e90f36c
--- /dev/null
+++ b/net/sunrpc/socklib.c
@@ -0,0 +1,175 @@
+/*
+ * linux/net/sunrpc/socklib.c
+ *
+ * Common socket helper routines for RPC client and server
+ *
+ * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
+ */
+
+#include <linux/types.h>
+#include <linux/pagemap.h>
+#include <linux/udp.h>
+#include <linux/sunrpc/xdr.h>
+
+
+/**
+ * skb_read_bits - copy some data bits from skb to internal buffer
+ * @desc: sk_buff copy helper
+ * @to: copy destination
+ * @len: number of bytes to copy
+ *
+ * Possibly called several times to iterate over an sk_buff and copy
+ * data out of it.
+ */
+static size_t skb_read_bits(skb_reader_t *desc, void *to, size_t len)
+{
+ if (len > desc->count)
+ len = desc->count;
+ if (skb_copy_bits(desc->skb, desc->offset, to, len))
+ return 0;
+ desc->count -= len;
+ desc->offset += len;
+ return len;
+}
+
+/**
+ * skb_read_and_csum_bits - copy and checksum from skb to buffer
+ * @desc: sk_buff copy helper
+ * @to: copy destination
+ * @len: number of bytes to copy
+ *
+ * Same as skb_read_bits, but calculate a checksum at the same time.
+ */
+static size_t skb_read_and_csum_bits(skb_reader_t *desc, void *to, size_t len)
+{
+ unsigned int csum2, pos;
+
+ if (len > desc->count)
+ len = desc->count;
+ pos = desc->offset;
+ csum2 = skb_copy_and_csum_bits(desc->skb, pos, to, len, 0);
+ desc->csum = csum_block_add(desc->csum, csum2, pos);
+ desc->count -= len;
+ desc->offset += len;
+ return len;
+}
+
+/**
+ * xdr_partial_copy_from_skb - copy data out of an skb
+ * @xdr: target XDR buffer
+ * @base: starting offset
+ * @desc: sk_buff copy helper
+ * @copy_actor: virtual method for copying data
+ *
+ */
+ssize_t xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base, skb_reader_t *desc, skb_read_actor_t copy_actor)
+{
+ struct page **ppage = xdr->pages;
+ unsigned int len, pglen = xdr->page_len;
+ ssize_t copied = 0;
+ int ret;
+
+ len = xdr->head[0].iov_len;
+ if (base < len) {
+ len -= base;
+ ret = copy_actor(desc, (char *)xdr->head[0].iov_base + base, len);
+ copied += ret;
+ if (ret != len || !desc->count)
+ goto out;
+ base = 0;
+ } else
+ base -= len;
+
+ if (unlikely(pglen == 0))
+ goto copy_tail;
+ if (unlikely(base >= pglen)) {
+ base -= pglen;
+ goto copy_tail;
+ }
+ if (base || xdr->page_base) {
+ pglen -= base;
+ base += xdr->page_base;
+ ppage += base >> PAGE_CACHE_SHIFT;
+ base &= ~PAGE_CACHE_MASK;
+ }
+ do {
+ char *kaddr;
+
+ /* ACL likes to be lazy in allocating pages - ACLs
+ * are small by default but can get huge. */
+ if (unlikely(*ppage == NULL)) {
+ *ppage = alloc_page(GFP_ATOMIC);
+ if (unlikely(*ppage == NULL)) {
+ if (copied == 0)
+ copied = -ENOMEM;
+ goto out;
+ }
+ }
+
+ len = PAGE_CACHE_SIZE;
+ kaddr = kmap_atomic(*ppage, KM_SKB_SUNRPC_DATA);
+ if (base) {
+ len -= base;
+ if (pglen < len)
+ len = pglen;
+ ret = copy_actor(desc, kaddr + base, len);
+ base = 0;
+ } else {
+ if (pglen < len)
+ len = pglen;
+ ret = copy_actor(desc, kaddr, len);
+ }
+ flush_dcache_page(*ppage);
+ kunmap_atomic(kaddr, KM_SKB_SUNRPC_DATA);
+ copied += ret;
+ if (ret != len || !desc->count)
+ goto out;
+ ppage++;
+ } while ((pglen -= len) != 0);
+copy_tail:
+ len = xdr->tail[0].iov_len;
+ if (base < len)
+ copied += copy_actor(desc, (char *)xdr->tail[0].iov_base + base, len - base);
+out:
+ return copied;
+}
+
+/**
+ * csum_partial_copy_to_xdr - checksum and copy data
+ * @xdr: target XDR buffer
+ * @skb: source skb
+ *
+ * We have set things up such that we perform the checksum of the UDP
+ * packet in parallel with the copies into the RPC client iovec. -DaveM
+ */
+int csum_partial_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb)
+{
+ skb_reader_t desc;
+
+ desc.skb = skb;
+ desc.offset = sizeof(struct udphdr);
+ desc.count = skb->len - desc.offset;
+
+ if (skb->ip_summed == CHECKSUM_UNNECESSARY)
+ goto no_checksum;
+
+ desc.csum = csum_partial(skb->data, desc.offset, skb->csum);
+ if (xdr_partial_copy_from_skb(xdr, 0, &desc, skb_read_and_csum_bits) < 0)
+ return -1;
+ if (desc.offset != skb->len) {
+ unsigned int csum2;
+ csum2 = skb_checksum(skb, desc.offset, skb->len - desc.offset, 0);
+ desc.csum = csum_block_add(desc.csum, csum2, desc.offset);
+ }
+ if (desc.count)
+ return -1;
+ if ((unsigned short)csum_fold(desc.csum))
+ return -1;
+ return 0;
+no_checksum:
+ if (xdr_partial_copy_from_skb(xdr, 0, &desc, skb_read_bits) < 0)
+ return -1;
+ if (desc.count)
+ return -1;
+ return 0;
+}
diff --git a/net/sunrpc/sunrpc_syms.c b/net/sunrpc/sunrpc_syms.c
index ed48ff022d3..2387e7b823f 100644
--- a/net/sunrpc/sunrpc_syms.c
+++ b/net/sunrpc/sunrpc_syms.c
@@ -10,7 +10,6 @@
#include <linux/module.h>
#include <linux/types.h>
-#include <linux/socket.h>
#include <linux/sched.h>
#include <linux/uio.h>
#include <linux/unistd.h>
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 30ec3efc48a..f16e7cdd615 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -548,9 +548,6 @@ svc_write_space(struct sock *sk)
/*
* Receive a datagram from a UDP socket.
*/
-extern int
-csum_partial_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb);
-
static int
svc_udp_recvfrom(struct svc_rqst *rqstp)
{
@@ -587,7 +584,7 @@ svc_udp_recvfrom(struct svc_rqst *rqstp)
struct timeval tv;
tv.tv_sec = xtime.tv_sec;
- tv.tv_usec = xtime.tv_nsec * 1000;
+ tv.tv_usec = xtime.tv_nsec / NSEC_PER_USEC;
skb_set_timestamp(skb, &tv);
/* Don't enable netstamp, sunrpc doesn't
need that much accuracy */
diff --git a/net/sunrpc/sysctl.c b/net/sunrpc/sysctl.c
index 1b9616a12e2..d0c9f460e41 100644
--- a/net/sunrpc/sysctl.c
+++ b/net/sunrpc/sysctl.c
@@ -119,8 +119,18 @@ done:
return 0;
}
+unsigned int xprt_udp_slot_table_entries = RPC_DEF_SLOT_TABLE;
+unsigned int xprt_tcp_slot_table_entries = RPC_DEF_SLOT_TABLE;
+unsigned int xprt_min_resvport = RPC_DEF_MIN_RESVPORT;
+EXPORT_SYMBOL(xprt_min_resvport);
+unsigned int xprt_max_resvport = RPC_DEF_MAX_RESVPORT;
+EXPORT_SYMBOL(xprt_max_resvport);
+
+
static unsigned int min_slot_table_size = RPC_MIN_SLOT_TABLE;
static unsigned int max_slot_table_size = RPC_MAX_SLOT_TABLE;
+static unsigned int xprt_min_resvport_limit = RPC_MIN_RESVPORT;
+static unsigned int xprt_max_resvport_limit = RPC_MAX_RESVPORT;
static ctl_table debug_table[] = {
{
@@ -177,6 +187,28 @@ static ctl_table debug_table[] = {
.extra1 = &min_slot_table_size,
.extra2 = &max_slot_table_size
},
+ {
+ .ctl_name = CTL_MIN_RESVPORT,
+ .procname = "min_resvport",
+ .data = &xprt_min_resvport,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec_minmax,
+ .strategy = &sysctl_intvec,
+ .extra1 = &xprt_min_resvport_limit,
+ .extra2 = &xprt_max_resvport_limit
+ },
+ {
+ .ctl_name = CTL_MAX_RESVPORT,
+ .procname = "max_resvport",
+ .data = &xprt_max_resvport,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec_minmax,
+ .strategy = &sysctl_intvec,
+ .extra1 = &xprt_min_resvport_limit,
+ .extra2 = &xprt_max_resvport_limit
+ },
{ .ctl_name = 0 }
};
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index fde16f40a58..32df43372ee 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -6,15 +6,12 @@
* Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
*/
+#include <linux/module.h>
#include <linux/types.h>
-#include <linux/socket.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/pagemap.h>
#include <linux/errno.h>
-#include <linux/in.h>
-#include <linux/net.h>
-#include <net/sock.h>
#include <linux/sunrpc/xdr.h>
#include <linux/sunrpc/msg_prot.h>
@@ -176,178 +173,6 @@ xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset,
xdr->buflen += len;
}
-ssize_t
-xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base,
- skb_reader_t *desc,
- skb_read_actor_t copy_actor)
-{
- struct page **ppage = xdr->pages;
- unsigned int len, pglen = xdr->page_len;
- ssize_t copied = 0;
- int ret;
-
- len = xdr->head[0].iov_len;
- if (base < len) {
- len -= base;
- ret = copy_actor(desc, (char *)xdr->head[0].iov_base + base, len);
- copied += ret;
- if (ret != len || !desc->count)
- goto out;
- base = 0;
- } else
- base -= len;
-
- if (pglen == 0)
- goto copy_tail;
- if (base >= pglen) {
- base -= pglen;
- goto copy_tail;
- }
- if (base || xdr->page_base) {
- pglen -= base;
- base += xdr->page_base;
- ppage += base >> PAGE_CACHE_SHIFT;
- base &= ~PAGE_CACHE_MASK;
- }
- do {
- char *kaddr;
-
- /* ACL likes to be lazy in allocating pages - ACLs
- * are small by default but can get huge. */
- if (unlikely(*ppage == NULL)) {
- *ppage = alloc_page(GFP_ATOMIC);
- if (unlikely(*ppage == NULL)) {
- if (copied == 0)
- copied = -ENOMEM;
- goto out;
- }
- }
-
- len = PAGE_CACHE_SIZE;
- kaddr = kmap_atomic(*ppage, KM_SKB_SUNRPC_DATA);
- if (base) {
- len -= base;
- if (pglen < len)
- len = pglen;
- ret = copy_actor(desc, kaddr + base, len);
- base = 0;
- } else {
- if (pglen < len)
- len = pglen;
- ret = copy_actor(desc, kaddr, len);
- }
- flush_dcache_page(*ppage);
- kunmap_atomic(kaddr, KM_SKB_SUNRPC_DATA);
- copied += ret;
- if (ret != len || !desc->count)
- goto out;
- ppage++;
- } while ((pglen -= len) != 0);
-copy_tail:
- len = xdr->tail[0].iov_len;
- if (base < len)
- copied += copy_actor(desc, (char *)xdr->tail[0].iov_base + base, len - base);
-out:
- return copied;
-}
-
-
-int
-xdr_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen,
- struct xdr_buf *xdr, unsigned int base, int msgflags)
-{
- struct page **ppage = xdr->pages;
- unsigned int len, pglen = xdr->page_len;
- int err, ret = 0;
- ssize_t (*sendpage)(struct socket *, struct page *, int, size_t, int);
-
- len = xdr->head[0].iov_len;
- if (base < len || (addr != NULL && base == 0)) {
- struct kvec iov = {
- .iov_base = xdr->head[0].iov_base + base,
- .iov_len = len - base,
- };
- struct msghdr msg = {
- .msg_name = addr,
- .msg_namelen = addrlen,
- .msg_flags = msgflags,
- };
- if (xdr->len > len)
- msg.msg_flags |= MSG_MORE;
-
- if (iov.iov_len != 0)
- err = kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len);
- else
- err = kernel_sendmsg(sock, &msg, NULL, 0, 0);
- if (ret == 0)
- ret = err;
- else if (err > 0)
- ret += err;
- if (err != iov.iov_len)
- goto out;
- base = 0;
- } else
- base -= len;
-
- if (pglen == 0)
- goto copy_tail;
- if (base >= pglen) {
- base -= pglen;
- goto copy_tail;
- }
- if (base || xdr->page_base) {
- pglen -= base;
- base += xdr->page_base;
- ppage += base >> PAGE_CACHE_SHIFT;
- base &= ~PAGE_CACHE_MASK;
- }
-
- sendpage = sock->ops->sendpage ? : sock_no_sendpage;
- do {
- int flags = msgflags;
-
- len = PAGE_CACHE_SIZE;
- if (base)
- len -= base;
- if (pglen < len)
- len = pglen;
-
- if (pglen != len || xdr->tail[0].iov_len != 0)
- flags |= MSG_MORE;
-
- /* Hmm... We might be dealing with highmem pages */
- if (PageHighMem(*ppage))
- sendpage = sock_no_sendpage;
- err = sendpage(sock, *ppage, base, len, flags);
- if (ret == 0)
- ret = err;
- else if (err > 0)
- ret += err;
- if (err != len)
- goto out;
- base = 0;
- ppage++;
- } while ((pglen -= len) != 0);
-copy_tail:
- len = xdr->tail[0].iov_len;
- if (base < len) {
- struct kvec iov = {
- .iov_base = xdr->tail[0].iov_base + base,
- .iov_len = len - base,
- };
- struct msghdr msg = {
- .msg_flags = msgflags,
- };
- err = kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len);
- if (ret == 0)
- ret = err;
- else if (err > 0)
- ret += err;
- }
-out:
- return ret;
-}
-
/*
* Helper routines for doing 'memmove' like operations on a struct xdr_buf
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 3c654e06b08..6dda3860351 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -10,12 +10,12 @@
* one is available. Otherwise, it sleeps on the backlog queue
* (xprt_reserve).
* - Next, the caller puts together the RPC message, stuffs it into
- * the request struct, and calls xprt_call().
- * - xprt_call transmits the message and installs the caller on the
- * socket's wait list. At the same time, it installs a timer that
+ * the request struct, and calls xprt_transmit().
+ * - xprt_transmit sends the message and installs the caller on the
+ * transport's wait list. At the same time, it installs a timer that
* is run after the packet's timeout has expired.
* - When a packet arrives, the data_ready handler walks the list of
- * pending requests for that socket. If a matching XID is found, the
+ * pending requests for that transport. If a matching XID is found, the
* caller is woken up, and the timer removed.
* - When no reply arrives within the timeout interval, the timer is
* fired by the kernel and runs xprt_timer(). It either adjusts the
@@ -33,36 +33,17 @@
*
* Copyright (C) 1995-1997, Olaf Kirch <okir@monad.swb.de>
*
- * TCP callback races fixes (C) 1998 Red Hat Software <alan@redhat.com>
- * TCP send fixes (C) 1998 Red Hat Software <alan@redhat.com>
- * TCP NFS related read + write fixes
- * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie>
- *
- * Rewrite of larges part of the code in order to stabilize TCP stuff.
- * Fix behaviour when socket buffer is full.
- * (C) 1999 Trond Myklebust <trond.myklebust@fys.uio.no>
+ * Transport switch API copyright (C) 2005, Chuck Lever <cel@netapp.com>
*/
+#include <linux/module.h>
+
#include <linux/types.h>
-#include <linux/slab.h>
-#include <linux/capability.h>
-#include <linux/sched.h>
-#include <linux/errno.h>
-#include <linux/socket.h>
-#include <linux/in.h>
-#include <linux/net.h>
-#include <linux/mm.h>
-#include <linux/udp.h>
-#include <linux/tcp.h>
-#include <linux/sunrpc/clnt.h>
-#include <linux/file.h>
+#include <linux/interrupt.h>
#include <linux/workqueue.h>
#include <linux/random.h>
-#include <net/sock.h>
-#include <net/checksum.h>
-#include <net/udp.h>
-#include <net/tcp.h>
+#include <linux/sunrpc/clnt.h>
/*
* Local variables
@@ -73,81 +54,90 @@
# define RPCDBG_FACILITY RPCDBG_XPRT
#endif
-#define XPRT_MAX_BACKOFF (8)
-#define XPRT_IDLE_TIMEOUT (5*60*HZ)
-#define XPRT_MAX_RESVPORT (800)
-
/*
* Local functions
*/
static void xprt_request_init(struct rpc_task *, struct rpc_xprt *);
static inline void do_xprt_reserve(struct rpc_task *);
-static void xprt_disconnect(struct rpc_xprt *);
static void xprt_connect_status(struct rpc_task *task);
-static struct rpc_xprt * xprt_setup(int proto, struct sockaddr_in *ap,
- struct rpc_timeout *to);
-static struct socket *xprt_create_socket(struct rpc_xprt *, int, int);
-static void xprt_bind_socket(struct rpc_xprt *, struct socket *);
static int __xprt_get_cong(struct rpc_xprt *, struct rpc_task *);
-static int xprt_clear_backlog(struct rpc_xprt *xprt);
-
-#ifdef RPC_DEBUG_DATA
/*
- * Print the buffer contents (first 128 bytes only--just enough for
- * diropres return).
+ * The transport code maintains an estimate on the maximum number of out-
+ * standing RPC requests, using a smoothed version of the congestion
+ * avoidance implemented in 44BSD. This is basically the Van Jacobson
+ * congestion algorithm: If a retransmit occurs, the congestion window is
+ * halved; otherwise, it is incremented by 1/cwnd when
+ *
+ * - a reply is received and
+ * - a full number of requests are outstanding and
+ * - the congestion window hasn't been updated recently.
*/
-static void
-xprt_pktdump(char *msg, u32 *packet, unsigned int count)
-{
- u8 *buf = (u8 *) packet;
- int j;
-
- dprintk("RPC: %s\n", msg);
- for (j = 0; j < count && j < 128; j += 4) {
- if (!(j & 31)) {
- if (j)
- dprintk("\n");
- dprintk("0x%04x ", j);
- }
- dprintk("%02x%02x%02x%02x ",
- buf[j], buf[j+1], buf[j+2], buf[j+3]);
- }
- dprintk("\n");
-}
-#else
-static inline void
-xprt_pktdump(char *msg, u32 *packet, unsigned int count)
-{
- /* NOP */
-}
-#endif
+#define RPC_CWNDSHIFT (8U)
+#define RPC_CWNDSCALE (1U << RPC_CWNDSHIFT)
+#define RPC_INITCWND RPC_CWNDSCALE
+#define RPC_MAXCWND(xprt) ((xprt)->max_reqs << RPC_CWNDSHIFT)
-/*
- * Look up RPC transport given an INET socket
+#define RPCXPRT_CONGESTED(xprt) ((xprt)->cong >= (xprt)->cwnd)
+
+/**
+ * xprt_reserve_xprt - serialize write access to transports
+ * @task: task that is requesting access to the transport
+ *
+ * This prevents mixing the payload of separate requests, and prevents
+ * transport connects from colliding with writes. No congestion control
+ * is provided.
*/
-static inline struct rpc_xprt *
-xprt_from_sock(struct sock *sk)
+int xprt_reserve_xprt(struct rpc_task *task)
{
- return (struct rpc_xprt *) sk->sk_user_data;
+ struct rpc_xprt *xprt = task->tk_xprt;
+ struct rpc_rqst *req = task->tk_rqstp;
+
+ if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
+ if (task == xprt->snd_task)
+ return 1;
+ if (task == NULL)
+ return 0;
+ goto out_sleep;
+ }
+ xprt->snd_task = task;
+ if (req) {
+ req->rq_bytes_sent = 0;
+ req->rq_ntrans++;
+ }
+ return 1;
+
+out_sleep:
+ dprintk("RPC: %4d failed to lock transport %p\n",
+ task->tk_pid, xprt);
+ task->tk_timeout = 0;
+ task->tk_status = -EAGAIN;
+ if (req && req->rq_ntrans)
+ rpc_sleep_on(&xprt->resend, task, NULL, NULL);
+ else
+ rpc_sleep_on(&xprt->sending, task, NULL, NULL);
+ return 0;
}
/*
- * Serialize write access to sockets, in order to prevent different
- * requests from interfering with each other.
- * Also prevents TCP socket connects from colliding with writes.
+ * xprt_reserve_xprt_cong - serialize write access to transports
+ * @task: task that is requesting access to the transport
+ *
+ * Same as xprt_reserve_xprt, but Van Jacobson congestion control is
+ * integrated into the decision of whether a request is allowed to be
+ * woken up and given access to the transport.
*/
-static int
-__xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task)
+int xprt_reserve_xprt_cong(struct rpc_task *task)
{
+ struct rpc_xprt *xprt = task->tk_xprt;
struct rpc_rqst *req = task->tk_rqstp;
- if (test_and_set_bit(XPRT_LOCKED, &xprt->sockstate)) {
+ if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
if (task == xprt->snd_task)
return 1;
goto out_sleep;
}
- if (xprt->nocong || __xprt_get_cong(xprt, task)) {
+ if (__xprt_get_cong(xprt, task)) {
xprt->snd_task = task;
if (req) {
req->rq_bytes_sent = 0;
@@ -156,10 +146,10 @@ __xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task)
return 1;
}
smp_mb__before_clear_bit();
- clear_bit(XPRT_LOCKED, &xprt->sockstate);
+ clear_bit(XPRT_LOCKED, &xprt->state);
smp_mb__after_clear_bit();
out_sleep:
- dprintk("RPC: %4d failed to lock socket %p\n", task->tk_pid, xprt);
+ dprintk("RPC: %4d failed to lock transport %p\n", task->tk_pid, xprt);
task->tk_timeout = 0;
task->tk_status = -EAGAIN;
if (req && req->rq_ntrans)
@@ -169,26 +159,52 @@ out_sleep:
return 0;
}
-static inline int
-xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task)
+static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task)
{
int retval;
- spin_lock_bh(&xprt->sock_lock);
- retval = __xprt_lock_write(xprt, task);
- spin_unlock_bh(&xprt->sock_lock);
+ spin_lock_bh(&xprt->transport_lock);
+ retval = xprt->ops->reserve_xprt(task);
+ spin_unlock_bh(&xprt->transport_lock);
return retval;
}
+static void __xprt_lock_write_next(struct rpc_xprt *xprt)
+{
+ struct rpc_task *task;
+ struct rpc_rqst *req;
-static void
-__xprt_lock_write_next(struct rpc_xprt *xprt)
+ if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
+ return;
+
+ task = rpc_wake_up_next(&xprt->resend);
+ if (!task) {
+ task = rpc_wake_up_next(&xprt->sending);
+ if (!task)
+ goto out_unlock;
+ }
+
+ req = task->tk_rqstp;
+ xprt->snd_task = task;
+ if (req) {
+ req->rq_bytes_sent = 0;
+ req->rq_ntrans++;
+ }
+ return;
+
+out_unlock:
+ smp_mb__before_clear_bit();
+ clear_bit(XPRT_LOCKED, &xprt->state);
+ smp_mb__after_clear_bit();
+}
+
+static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt)
{
struct rpc_task *task;
- if (test_and_set_bit(XPRT_LOCKED, &xprt->sockstate))
+ if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
return;
- if (!xprt->nocong && RPCXPRT_CONGESTED(xprt))
+ if (RPCXPRT_CONGESTED(xprt))
goto out_unlock;
task = rpc_wake_up_next(&xprt->resend);
if (!task) {
@@ -196,7 +212,7 @@ __xprt_lock_write_next(struct rpc_xprt *xprt)
if (!task)
goto out_unlock;
}
- if (xprt->nocong || __xprt_get_cong(xprt, task)) {
+ if (__xprt_get_cong(xprt, task)) {
struct rpc_rqst *req = task->tk_rqstp;
xprt->snd_task = task;
if (req) {
@@ -207,87 +223,52 @@ __xprt_lock_write_next(struct rpc_xprt *xprt)
}
out_unlock:
smp_mb__before_clear_bit();
- clear_bit(XPRT_LOCKED, &xprt->sockstate);
+ clear_bit(XPRT_LOCKED, &xprt->state);
smp_mb__after_clear_bit();
}
-/*
- * Releases the socket for use by other requests.
+/**
+ * xprt_release_xprt - allow other requests to use a transport
+ * @xprt: transport with other tasks potentially waiting
+ * @task: task that is releasing access to the transport
+ *
+ * Note that "task" can be NULL. No congestion control is provided.
*/
-static void
-__xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task)
+void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
{
if (xprt->snd_task == task) {
xprt->snd_task = NULL;
smp_mb__before_clear_bit();
- clear_bit(XPRT_LOCKED, &xprt->sockstate);
+ clear_bit(XPRT_LOCKED, &xprt->state);
smp_mb__after_clear_bit();
__xprt_lock_write_next(xprt);
}
}
-static inline void
-xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task)
-{
- spin_lock_bh(&xprt->sock_lock);
- __xprt_release_write(xprt, task);
- spin_unlock_bh(&xprt->sock_lock);
-}
-
-/*
- * Write data to socket.
+/**
+ * xprt_release_xprt_cong - allow other requests to use a transport
+ * @xprt: transport with other tasks potentially waiting
+ * @task: task that is releasing access to the transport
+ *
+ * Note that "task" can be NULL. Another task is awoken to use the
+ * transport if the transport's congestion window allows it.
*/
-static inline int
-xprt_sendmsg(struct rpc_xprt *xprt, struct rpc_rqst *req)
+void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
{
- struct socket *sock = xprt->sock;
- struct xdr_buf *xdr = &req->rq_snd_buf;
- struct sockaddr *addr = NULL;
- int addrlen = 0;
- unsigned int skip;
- int result;
-
- if (!sock)
- return -ENOTCONN;
-
- xprt_pktdump("packet data:",
- req->rq_svec->iov_base,
- req->rq_svec->iov_len);
-
- /* For UDP, we need to provide an address */
- if (!xprt->stream) {
- addr = (struct sockaddr *) &xprt->addr;
- addrlen = sizeof(xprt->addr);
+ if (xprt->snd_task == task) {
+ xprt->snd_task = NULL;
+ smp_mb__before_clear_bit();
+ clear_bit(XPRT_LOCKED, &xprt->state);
+ smp_mb__after_clear_bit();
+ __xprt_lock_write_next_cong(xprt);
}
- /* Dont repeat bytes */
- skip = req->rq_bytes_sent;
-
- clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags);
- result = xdr_sendpages(sock, addr, addrlen, xdr, skip, MSG_DONTWAIT);
-
- dprintk("RPC: xprt_sendmsg(%d) = %d\n", xdr->len - skip, result);
-
- if (result >= 0)
- return result;
+}
- switch (result) {
- case -ECONNREFUSED:
- /* When the server has died, an ICMP port unreachable message
- * prompts ECONNREFUSED.
- */
- case -EAGAIN:
- break;
- case -ECONNRESET:
- case -ENOTCONN:
- case -EPIPE:
- /* connection broken */
- if (xprt->stream)
- result = -ENOTCONN;
- break;
- default:
- printk(KERN_NOTICE "RPC: sendmsg returned error %d\n", -result);
- }
- return result;
+static inline void xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task)
+{
+ spin_lock_bh(&xprt->transport_lock);
+ xprt->ops->release_xprt(xprt, task);
+ spin_unlock_bh(&xprt->transport_lock);
}
/*
@@ -321,26 +302,40 @@ __xprt_put_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
return;
req->rq_cong = 0;
xprt->cong -= RPC_CWNDSCALE;
- __xprt_lock_write_next(xprt);
+ __xprt_lock_write_next_cong(xprt);
}
-/*
- * Adjust RPC congestion window
+/**
+ * xprt_release_rqst_cong - housekeeping when request is complete
+ * @task: RPC request that recently completed
+ *
+ * Useful for transports that require congestion control.
+ */
+void xprt_release_rqst_cong(struct rpc_task *task)
+{
+ __xprt_put_cong(task->tk_xprt, task->tk_rqstp);
+}
+
+/**
+ * xprt_adjust_cwnd - adjust transport congestion window
+ * @task: recently completed RPC request used to adjust window
+ * @result: result code of completed RPC request
+ *
* We use a time-smoothed congestion estimator to avoid heavy oscillation.
*/
-static void
-xprt_adjust_cwnd(struct rpc_xprt *xprt, int result)
+void xprt_adjust_cwnd(struct rpc_task *task, int result)
{
- unsigned long cwnd;
+ struct rpc_rqst *req = task->tk_rqstp;
+ struct rpc_xprt *xprt = task->tk_xprt;
+ unsigned long cwnd = xprt->cwnd;
- cwnd = xprt->cwnd;
if (result >= 0 && cwnd <= xprt->cong) {
/* The (cwnd >> 1) term makes sure
* the result gets rounded properly. */
cwnd += (RPC_CWNDSCALE * RPC_CWNDSCALE + (cwnd >> 1)) / cwnd;
if (cwnd > RPC_MAXCWND(xprt))
cwnd = RPC_MAXCWND(xprt);
- __xprt_lock_write_next(xprt);
+ __xprt_lock_write_next_cong(xprt);
} else if (result == -ETIMEDOUT) {
cwnd >>= 1;
if (cwnd < RPC_CWNDSCALE)
@@ -349,11 +344,89 @@ xprt_adjust_cwnd(struct rpc_xprt *xprt, int result)
dprintk("RPC: cong %ld, cwnd was %ld, now %ld\n",
xprt->cong, xprt->cwnd, cwnd);
xprt->cwnd = cwnd;
+ __xprt_put_cong(xprt, req);
+}
+
+/**
+ * xprt_wake_pending_tasks - wake all tasks on a transport's pending queue
+ * @xprt: transport with waiting tasks
+ * @status: result code to plant in each task before waking it
+ *
+ */
+void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status)
+{
+ if (status < 0)
+ rpc_wake_up_status(&xprt->pending, status);
+ else
+ rpc_wake_up(&xprt->pending);
+}
+
+/**
+ * xprt_wait_for_buffer_space - wait for transport output buffer to clear
+ * @task: task to be put to sleep
+ *
+ */
+void xprt_wait_for_buffer_space(struct rpc_task *task)
+{
+ struct rpc_rqst *req = task->tk_rqstp;
+ struct rpc_xprt *xprt = req->rq_xprt;
+
+ task->tk_timeout = req->rq_timeout;
+ rpc_sleep_on(&xprt->pending, task, NULL, NULL);
+}
+
+/**
+ * xprt_write_space - wake the task waiting for transport output buffer space
+ * @xprt: transport with waiting tasks
+ *
+ * Can be called in a soft IRQ context, so xprt_write_space never sleeps.
+ */
+void xprt_write_space(struct rpc_xprt *xprt)
+{
+ if (unlikely(xprt->shutdown))
+ return;
+
+ spin_lock_bh(&xprt->transport_lock);
+ if (xprt->snd_task) {
+ dprintk("RPC: write space: waking waiting task on xprt %p\n",
+ xprt);
+ rpc_wake_up_task(xprt->snd_task);
+ }
+ spin_unlock_bh(&xprt->transport_lock);
+}
+
+/**
+ * xprt_set_retrans_timeout_def - set a request's retransmit timeout
+ * @task: task whose timeout is to be set
+ *
+ * Set a request's retransmit timeout based on the transport's
+ * default timeout parameters. Used by transports that don't adjust
+ * the retransmit timeout based on round-trip time estimation.
+ */
+void xprt_set_retrans_timeout_def(struct rpc_task *task)
+{
+ task->tk_timeout = task->tk_rqstp->rq_timeout;
}
/*
- * Reset the major timeout value
+ * xprt_set_retrans_timeout_rtt - set a request's retransmit timeout
+ * @task: task whose timeout is to be set
+ *
+ * Set a request's retransmit timeout using the RTT estimator.
*/
+void xprt_set_retrans_timeout_rtt(struct rpc_task *task)
+{
+ int timer = task->tk_msg.rpc_proc->p_timer;
+ struct rpc_rtt *rtt = task->tk_client->cl_rtt;
+ struct rpc_rqst *req = task->tk_rqstp;
+ unsigned long max_timeout = req->rq_xprt->timeout.to_maxval;
+
+ task->tk_timeout = rpc_calc_rto(rtt, timer);
+ task->tk_timeout <<= rpc_ntimeo(rtt, timer) + req->rq_retries;
+ if (task->tk_timeout > max_timeout || task->tk_timeout == 0)
+ task->tk_timeout = max_timeout;
+}
+
static void xprt_reset_majortimeo(struct rpc_rqst *req)
{
struct rpc_timeout *to = &req->rq_xprt->timeout;
@@ -368,8 +441,10 @@ static void xprt_reset_majortimeo(struct rpc_rqst *req)
req->rq_majortimeo += jiffies;
}
-/*
- * Adjust timeout values etc for next retransmit
+/**
+ * xprt_adjust_timeout - adjust timeout values for next retransmit
+ * @req: RPC request containing parameters to use for the adjustment
+ *
*/
int xprt_adjust_timeout(struct rpc_rqst *req)
{
@@ -391,9 +466,9 @@ int xprt_adjust_timeout(struct rpc_rqst *req)
req->rq_retries = 0;
xprt_reset_majortimeo(req);
/* Reset the RTT counters == "slow start" */
- spin_lock_bh(&xprt->sock_lock);
+ spin_lock_bh(&xprt->transport_lock);
rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval);
- spin_unlock_bh(&xprt->sock_lock);
+ spin_unlock_bh(&xprt->transport_lock);
pprintk("RPC: %lu timeout\n", jiffies);
status = -ETIMEDOUT;
}
@@ -405,133 +480,52 @@ int xprt_adjust_timeout(struct rpc_rqst *req)
return status;
}
-/*
- * Close down a transport socket
- */
-static void
-xprt_close(struct rpc_xprt *xprt)
-{
- struct socket *sock = xprt->sock;
- struct sock *sk = xprt->inet;
-
- if (!sk)
- return;
-
- write_lock_bh(&sk->sk_callback_lock);
- xprt->inet = NULL;
- xprt->sock = NULL;
-
- sk->sk_user_data = NULL;
- sk->sk_data_ready = xprt->old_data_ready;
- sk->sk_state_change = xprt->old_state_change;
- sk->sk_write_space = xprt->old_write_space;
- write_unlock_bh(&sk->sk_callback_lock);
-
- sk->sk_no_check = 0;
-
- sock_release(sock);
-}
-
-static void
-xprt_socket_autoclose(void *args)
+static void xprt_autoclose(void *args)
{
struct rpc_xprt *xprt = (struct rpc_xprt *)args;
xprt_disconnect(xprt);
- xprt_close(xprt);
+ xprt->ops->close(xprt);
xprt_release_write(xprt, NULL);
}
-/*
- * Mark a transport as disconnected
+/**
+ * xprt_disconnect - mark a transport as disconnected
+ * @xprt: transport to flag for disconnect
+ *
*/
-static void
-xprt_disconnect(struct rpc_xprt *xprt)
+void xprt_disconnect(struct rpc_xprt *xprt)
{
dprintk("RPC: disconnected transport %p\n", xprt);
- spin_lock_bh(&xprt->sock_lock);
+ spin_lock_bh(&xprt->transport_lock);
xprt_clear_connected(xprt);
- rpc_wake_up_status(&xprt->pending, -ENOTCONN);
- spin_unlock_bh(&xprt->sock_lock);
+ xprt_wake_pending_tasks(xprt, -ENOTCONN);
+ spin_unlock_bh(&xprt->transport_lock);
}
-/*
- * Used to allow disconnection when we've been idle
- */
static void
xprt_init_autodisconnect(unsigned long data)
{
struct rpc_xprt *xprt = (struct rpc_xprt *)data;
- spin_lock(&xprt->sock_lock);
+ spin_lock(&xprt->transport_lock);
if (!list_empty(&xprt->recv) || xprt->shutdown)
goto out_abort;
- if (test_and_set_bit(XPRT_LOCKED, &xprt->sockstate))
+ if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
goto out_abort;
- spin_unlock(&xprt->sock_lock);
- /* Let keventd close the socket */
- if (test_bit(XPRT_CONNECTING, &xprt->sockstate) != 0)
+ spin_unlock(&xprt->transport_lock);
+ if (xprt_connecting(xprt))
xprt_release_write(xprt, NULL);
else
schedule_work(&xprt->task_cleanup);
return;
out_abort:
- spin_unlock(&xprt->sock_lock);
-}
-
-static void xprt_socket_connect(void *args)
-{
- struct rpc_xprt *xprt = (struct rpc_xprt *)args;
- struct socket *sock = xprt->sock;
- int status = -EIO;
-
- if (xprt->shutdown || xprt->addr.sin_port == 0)
- goto out;
-
- /*
- * Start by resetting any existing state
- */
- xprt_close(xprt);
- sock = xprt_create_socket(xprt, xprt->prot, xprt->resvport);
- if (sock == NULL) {
- /* couldn't create socket or bind to reserved port;
- * this is likely a permanent error, so cause an abort */
- goto out;
- }
- xprt_bind_socket(xprt, sock);
- xprt_sock_setbufsize(xprt);
-
- status = 0;
- if (!xprt->stream)
- goto out;
-
- /*
- * Tell the socket layer to start connecting...
- */
- status = sock->ops->connect(sock, (struct sockaddr *) &xprt->addr,
- sizeof(xprt->addr), O_NONBLOCK);
- dprintk("RPC: %p connect status %d connected %d sock state %d\n",
- xprt, -status, xprt_connected(xprt), sock->sk->sk_state);
- if (status < 0) {
- switch (status) {
- case -EINPROGRESS:
- case -EALREADY:
- goto out_clear;
- }
- }
-out:
- if (status < 0)
- rpc_wake_up_status(&xprt->pending, status);
- else
- rpc_wake_up(&xprt->pending);
-out_clear:
- smp_mb__before_clear_bit();
- clear_bit(XPRT_CONNECTING, &xprt->sockstate);
- smp_mb__after_clear_bit();
+ spin_unlock(&xprt->transport_lock);
}
-/*
- * Attempt to connect a TCP socket.
+/**
+ * xprt_connect - schedule a transport connect operation
+ * @task: RPC task that is requesting the connect
*
*/
void xprt_connect(struct rpc_task *task)
@@ -552,37 +546,19 @@ void xprt_connect(struct rpc_task *task)
if (!xprt_lock_write(xprt, task))
return;
if (xprt_connected(xprt))
- goto out_write;
+ xprt_release_write(xprt, task);
+ else {
+ if (task->tk_rqstp)
+ task->tk_rqstp->rq_bytes_sent = 0;
- if (task->tk_rqstp)
- task->tk_rqstp->rq_bytes_sent = 0;
-
- task->tk_timeout = RPC_CONNECT_TIMEOUT;
- rpc_sleep_on(&xprt->pending, task, xprt_connect_status, NULL);
- if (!test_and_set_bit(XPRT_CONNECTING, &xprt->sockstate)) {
- /* Note: if we are here due to a dropped connection
- * we delay reconnecting by RPC_REESTABLISH_TIMEOUT/HZ
- * seconds
- */
- if (xprt->sock != NULL)
- schedule_delayed_work(&xprt->sock_connect,
- RPC_REESTABLISH_TIMEOUT);
- else {
- schedule_work(&xprt->sock_connect);
- if (!RPC_IS_ASYNC(task))
- flush_scheduled_work();
- }
+ task->tk_timeout = xprt->connect_timeout;
+ rpc_sleep_on(&xprt->pending, task, xprt_connect_status, NULL);
+ xprt->ops->connect(task);
}
return;
- out_write:
- xprt_release_write(xprt, task);
}
-/*
- * We arrive here when awoken from waiting on connection establishment.
- */
-static void
-xprt_connect_status(struct rpc_task *task)
+static void xprt_connect_status(struct rpc_task *task)
{
struct rpc_xprt *xprt = task->tk_xprt;
@@ -592,31 +568,42 @@ xprt_connect_status(struct rpc_task *task)
return;
}
- /* if soft mounted, just cause this RPC to fail */
- if (RPC_IS_SOFT(task))
- task->tk_status = -EIO;
-
switch (task->tk_status) {
case -ECONNREFUSED:
case -ECONNRESET:
+ dprintk("RPC: %4d xprt_connect_status: server %s refused connection\n",
+ task->tk_pid, task->tk_client->cl_server);
+ break;
case -ENOTCONN:
- return;
+ dprintk("RPC: %4d xprt_connect_status: connection broken\n",
+ task->tk_pid);
+ break;
case -ETIMEDOUT:
- dprintk("RPC: %4d xprt_connect_status: timed out\n",
+ dprintk("RPC: %4d xprt_connect_status: connect attempt timed out\n",
task->tk_pid);
break;
default:
- printk(KERN_ERR "RPC: error %d connecting to server %s\n",
- -task->tk_status, task->tk_client->cl_server);
+ dprintk("RPC: %4d xprt_connect_status: error %d connecting to server %s\n",
+ task->tk_pid, -task->tk_status, task->tk_client->cl_server);
+ xprt_release_write(xprt, task);
+ task->tk_status = -EIO;
+ return;
+ }
+
+ /* if soft mounted, just cause this RPC to fail */
+ if (RPC_IS_SOFT(task)) {
+ xprt_release_write(xprt, task);
+ task->tk_status = -EIO;
}
- xprt_release_write(xprt, task);
}
-/*
- * Look up the RPC request corresponding to a reply, and then lock it.
+/**
+ * xprt_lookup_rqst - find an RPC request corresponding to an XID
+ * @xprt: transport on which the original request was transmitted
+ * @xid: RPC XID of incoming reply
+ *
*/
-static inline struct rpc_rqst *
-xprt_lookup_rqst(struct rpc_xprt *xprt, u32 xid)
+struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, u32 xid)
{
struct list_head *pos;
struct rpc_rqst *req = NULL;
@@ -631,556 +618,68 @@ xprt_lookup_rqst(struct rpc_xprt *xprt, u32 xid)
return req;
}
-/*
- * Complete reply received.
- * The TCP code relies on us to remove the request from xprt->pending.
- */
-static void
-xprt_complete_rqst(struct rpc_xprt *xprt, struct rpc_rqst *req, int copied)
-{
- struct rpc_task *task = req->rq_task;
- struct rpc_clnt *clnt = task->tk_client;
-
- /* Adjust congestion window */
- if (!xprt->nocong) {
- unsigned timer = task->tk_msg.rpc_proc->p_timer;
- xprt_adjust_cwnd(xprt, copied);
- __xprt_put_cong(xprt, req);
- if (timer) {
- if (req->rq_ntrans == 1)
- rpc_update_rtt(clnt->cl_rtt, timer,
- (long)jiffies - req->rq_xtime);
- rpc_set_timeo(clnt->cl_rtt, timer, req->rq_ntrans - 1);
- }
- }
-
-#ifdef RPC_PROFILE
- /* Profile only reads for now */
- if (copied > 1024) {
- static unsigned long nextstat;
- static unsigned long pkt_rtt, pkt_len, pkt_cnt;
-
- pkt_cnt++;
- pkt_len += req->rq_slen + copied;
- pkt_rtt += jiffies - req->rq_xtime;
- if (time_before(nextstat, jiffies)) {
- printk("RPC: %lu %ld cwnd\n", jiffies, xprt->cwnd);
- printk("RPC: %ld %ld %ld %ld stat\n",
- jiffies, pkt_cnt, pkt_len, pkt_rtt);
- pkt_rtt = pkt_len = pkt_cnt = 0;
- nextstat = jiffies + 5 * HZ;
- }
- }
-#endif
-
- dprintk("RPC: %4d has input (%d bytes)\n", task->tk_pid, copied);
- list_del_init(&req->rq_list);
- req->rq_received = req->rq_private_buf.len = copied;
-
- /* ... and wake up the process. */
- rpc_wake_up_task(task);
- return;
-}
-
-static size_t
-skb_read_bits(skb_reader_t *desc, void *to, size_t len)
-{
- if (len > desc->count)
- len = desc->count;
- if (skb_copy_bits(desc->skb, desc->offset, to, len))
- return 0;
- desc->count -= len;
- desc->offset += len;
- return len;
-}
-
-static size_t
-skb_read_and_csum_bits(skb_reader_t *desc, void *to, size_t len)
-{
- unsigned int csum2, pos;
-
- if (len > desc->count)
- len = desc->count;
- pos = desc->offset;
- csum2 = skb_copy_and_csum_bits(desc->skb, pos, to, len, 0);
- desc->csum = csum_block_add(desc->csum, csum2, pos);
- desc->count -= len;
- desc->offset += len;
- return len;
-}
-
-/*
- * We have set things up such that we perform the checksum of the UDP
- * packet in parallel with the copies into the RPC client iovec. -DaveM
- */
-int
-csum_partial_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb)
-{
- skb_reader_t desc;
-
- desc.skb = skb;
- desc.offset = sizeof(struct udphdr);
- desc.count = skb->len - desc.offset;
-
- if (skb->ip_summed == CHECKSUM_UNNECESSARY)
- goto no_checksum;
-
- desc.csum = csum_partial(skb->data, desc.offset, skb->csum);
- if (xdr_partial_copy_from_skb(xdr, 0, &desc, skb_read_and_csum_bits) < 0)
- return -1;
- if (desc.offset != skb->len) {
- unsigned int csum2;
- csum2 = skb_checksum(skb, desc.offset, skb->len - desc.offset, 0);
- desc.csum = csum_block_add(desc.csum, csum2, desc.offset);
- }
- if (desc.count)
- return -1;
- if ((unsigned short)csum_fold(desc.csum))
- return -1;
- return 0;
-no_checksum:
- if (xdr_partial_copy_from_skb(xdr, 0, &desc, skb_read_bits) < 0)
- return -1;
- if (desc.count)
- return -1;
- return 0;
-}
-
-/*
- * Input handler for RPC replies. Called from a bottom half and hence
- * atomic.
- */
-static void
-udp_data_ready(struct sock *sk, int len)
-{
- struct rpc_task *task;
- struct rpc_xprt *xprt;
- struct rpc_rqst *rovr;
- struct sk_buff *skb;
- int err, repsize, copied;
- u32 _xid, *xp;
-
- read_lock(&sk->sk_callback_lock);
- dprintk("RPC: udp_data_ready...\n");
- if (!(xprt = xprt_from_sock(sk))) {
- printk("RPC: udp_data_ready request not found!\n");
- goto out;
- }
-
- dprintk("RPC: udp_data_ready client %p\n", xprt);
-
- if ((skb = skb_recv_datagram(sk, 0, 1, &err)) == NULL)
- goto out;
-
- if (xprt->shutdown)
- goto dropit;
-
- repsize = skb->len - sizeof(struct udphdr);
- if (repsize < 4) {
- printk("RPC: impossible RPC reply size %d!\n", repsize);
- goto dropit;
- }
-
- /* Copy the XID from the skb... */
- xp = skb_header_pointer(skb, sizeof(struct udphdr),
- sizeof(_xid), &_xid);
- if (xp == NULL)
- goto dropit;
-
- /* Look up and lock the request corresponding to the given XID */
- spin_lock(&xprt->sock_lock);
- rovr = xprt_lookup_rqst(xprt, *xp);
- if (!rovr)
- goto out_unlock;
- task = rovr->rq_task;
-
- dprintk("RPC: %4d received reply\n", task->tk_pid);
-
- if ((copied = rovr->rq_private_buf.buflen) > repsize)
- copied = repsize;
-
- /* Suck it into the iovec, verify checksum if not done by hw. */
- if (csum_partial_copy_to_xdr(&rovr->rq_private_buf, skb))
- goto out_unlock;
-
- /* Something worked... */
- dst_confirm(skb->dst);
-
- xprt_complete_rqst(xprt, rovr, copied);
-
- out_unlock:
- spin_unlock(&xprt->sock_lock);
- dropit:
- skb_free_datagram(sk, skb);
- out:
- read_unlock(&sk->sk_callback_lock);
-}
-
-/*
- * Copy from an skb into memory and shrink the skb.
- */
-static inline size_t
-tcp_copy_data(skb_reader_t *desc, void *p, size_t len)
-{
- if (len > desc->count)
- len = desc->count;
- if (skb_copy_bits(desc->skb, desc->offset, p, len)) {
- dprintk("RPC: failed to copy %zu bytes from skb. %zu bytes remain\n",
- len, desc->count);
- return 0;
- }
- desc->offset += len;
- desc->count -= len;
- dprintk("RPC: copied %zu bytes from skb. %zu bytes remain\n",
- len, desc->count);
- return len;
-}
-
-/*
- * TCP read fragment marker
- */
-static inline void
-tcp_read_fraghdr(struct rpc_xprt *xprt, skb_reader_t *desc)
-{
- size_t len, used;
- char *p;
-
- p = ((char *) &xprt->tcp_recm) + xprt->tcp_offset;
- len = sizeof(xprt->tcp_recm) - xprt->tcp_offset;
- used = tcp_copy_data(desc, p, len);
- xprt->tcp_offset += used;
- if (used != len)
- return;
- xprt->tcp_reclen = ntohl(xprt->tcp_recm);
- if (xprt->tcp_reclen & 0x80000000)
- xprt->tcp_flags |= XPRT_LAST_FRAG;
- else
- xprt->tcp_flags &= ~XPRT_LAST_FRAG;
- xprt->tcp_reclen &= 0x7fffffff;
- xprt->tcp_flags &= ~XPRT_COPY_RECM;
- xprt->tcp_offset = 0;
- /* Sanity check of the record length */
- if (xprt->tcp_reclen < 4) {
- printk(KERN_ERR "RPC: Invalid TCP record fragment length\n");
- xprt_disconnect(xprt);
- }
- dprintk("RPC: reading TCP record fragment of length %d\n",
- xprt->tcp_reclen);
-}
-
-static void
-tcp_check_recm(struct rpc_xprt *xprt)
-{
- dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u, tcp_flags = %lx\n",
- xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen, xprt->tcp_flags);
- if (xprt->tcp_offset == xprt->tcp_reclen) {
- xprt->tcp_flags |= XPRT_COPY_RECM;
- xprt->tcp_offset = 0;
- if (xprt->tcp_flags & XPRT_LAST_FRAG) {
- xprt->tcp_flags &= ~XPRT_COPY_DATA;
- xprt->tcp_flags |= XPRT_COPY_XID;
- xprt->tcp_copied = 0;
- }
- }
-}
-
-/*
- * TCP read xid
- */
-static inline void
-tcp_read_xid(struct rpc_xprt *xprt, skb_reader_t *desc)
-{
- size_t len, used;
- char *p;
-
- len = sizeof(xprt->tcp_xid) - xprt->tcp_offset;
- dprintk("RPC: reading XID (%Zu bytes)\n", len);
- p = ((char *) &xprt->tcp_xid) + xprt->tcp_offset;
- used = tcp_copy_data(desc, p, len);
- xprt->tcp_offset += used;
- if (used != len)
- return;
- xprt->tcp_flags &= ~XPRT_COPY_XID;
- xprt->tcp_flags |= XPRT_COPY_DATA;
- xprt->tcp_copied = 4;
- dprintk("RPC: reading reply for XID %08x\n",
- ntohl(xprt->tcp_xid));
- tcp_check_recm(xprt);
-}
-
-/*
- * TCP read and complete request
- */
-static inline void
-tcp_read_request(struct rpc_xprt *xprt, skb_reader_t *desc)
-{
- struct rpc_rqst *req;
- struct xdr_buf *rcvbuf;
- size_t len;
- ssize_t r;
-
- /* Find and lock the request corresponding to this xid */
- spin_lock(&xprt->sock_lock);
- req = xprt_lookup_rqst(xprt, xprt->tcp_xid);
- if (!req) {
- xprt->tcp_flags &= ~XPRT_COPY_DATA;
- dprintk("RPC: XID %08x request not found!\n",
- ntohl(xprt->tcp_xid));
- spin_unlock(&xprt->sock_lock);
- return;
- }
-
- rcvbuf = &req->rq_private_buf;
- len = desc->count;
- if (len > xprt->tcp_reclen - xprt->tcp_offset) {
- skb_reader_t my_desc;
-
- len = xprt->tcp_reclen - xprt->tcp_offset;
- memcpy(&my_desc, desc, sizeof(my_desc));
- my_desc.count = len;
- r = xdr_partial_copy_from_skb(rcvbuf, xprt->tcp_copied,
- &my_desc, tcp_copy_data);
- desc->count -= r;
- desc->offset += r;
- } else
- r = xdr_partial_copy_from_skb(rcvbuf, xprt->tcp_copied,
- desc, tcp_copy_data);
-
- if (r > 0) {
- xprt->tcp_copied += r;
- xprt->tcp_offset += r;
- }
- if (r != len) {
- /* Error when copying to the receive buffer,
- * usually because we weren't able to allocate
- * additional buffer pages. All we can do now
- * is turn off XPRT_COPY_DATA, so the request
- * will not receive any additional updates,
- * and time out.
- * Any remaining data from this record will
- * be discarded.
- */
- xprt->tcp_flags &= ~XPRT_COPY_DATA;
- dprintk("RPC: XID %08x truncated request\n",
- ntohl(xprt->tcp_xid));
- dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u\n",
- xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen);
- goto out;
- }
-
- dprintk("RPC: XID %08x read %Zd bytes\n",
- ntohl(xprt->tcp_xid), r);
- dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u\n",
- xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen);
-
- if (xprt->tcp_copied == req->rq_private_buf.buflen)
- xprt->tcp_flags &= ~XPRT_COPY_DATA;
- else if (xprt->tcp_offset == xprt->tcp_reclen) {
- if (xprt->tcp_flags & XPRT_LAST_FRAG)
- xprt->tcp_flags &= ~XPRT_COPY_DATA;
- }
-
-out:
- if (!(xprt->tcp_flags & XPRT_COPY_DATA)) {
- dprintk("RPC: %4d received reply complete\n",
- req->rq_task->tk_pid);
- xprt_complete_rqst(xprt, req, xprt->tcp_copied);
- }
- spin_unlock(&xprt->sock_lock);
- tcp_check_recm(xprt);
-}
-
-/*
- * TCP discard extra bytes from a short read
- */
-static inline void
-tcp_read_discard(struct rpc_xprt *xprt, skb_reader_t *desc)
-{
- size_t len;
-
- len = xprt->tcp_reclen - xprt->tcp_offset;
- if (len > desc->count)
- len = desc->count;
- desc->count -= len;
- desc->offset += len;
- xprt->tcp_offset += len;
- dprintk("RPC: discarded %Zu bytes\n", len);
- tcp_check_recm(xprt);
-}
-
-/*
- * TCP record receive routine
- * We first have to grab the record marker, then the XID, then the data.
+/**
+ * xprt_update_rtt - update an RPC client's RTT state after receiving a reply
+ * @task: RPC request that recently completed
+ *
*/
-static int
-tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
- unsigned int offset, size_t len)
-{
- struct rpc_xprt *xprt = rd_desc->arg.data;
- skb_reader_t desc = {
- .skb = skb,
- .offset = offset,
- .count = len,
- .csum = 0
- };
-
- dprintk("RPC: tcp_data_recv\n");
- do {
- /* Read in a new fragment marker if necessary */
- /* Can we ever really expect to get completely empty fragments? */
- if (xprt->tcp_flags & XPRT_COPY_RECM) {
- tcp_read_fraghdr(xprt, &desc);
- continue;
- }
- /* Read in the xid if necessary */
- if (xprt->tcp_flags & XPRT_COPY_XID) {
- tcp_read_xid(xprt, &desc);
- continue;
- }
- /* Read in the request data */
- if (xprt->tcp_flags & XPRT_COPY_DATA) {
- tcp_read_request(xprt, &desc);
- continue;
- }
- /* Skip over any trailing bytes on short reads */
- tcp_read_discard(xprt, &desc);
- } while (desc.count);
- dprintk("RPC: tcp_data_recv done\n");
- return len - desc.count;
-}
-
-static void tcp_data_ready(struct sock *sk, int bytes)
+void xprt_update_rtt(struct rpc_task *task)
{
- struct rpc_xprt *xprt;
- read_descriptor_t rd_desc;
-
- read_lock(&sk->sk_callback_lock);
- dprintk("RPC: tcp_data_ready...\n");
- if (!(xprt = xprt_from_sock(sk))) {
- printk("RPC: tcp_data_ready socket info not found!\n");
- goto out;
- }
- if (xprt->shutdown)
- goto out;
-
- /* We use rd_desc to pass struct xprt to tcp_data_recv */
- rd_desc.arg.data = xprt;
- rd_desc.count = 65536;
- tcp_read_sock(sk, &rd_desc, tcp_data_recv);
-out:
- read_unlock(&sk->sk_callback_lock);
-}
-
-static void
-tcp_state_change(struct sock *sk)
-{
- struct rpc_xprt *xprt;
+ struct rpc_rqst *req = task->tk_rqstp;
+ struct rpc_rtt *rtt = task->tk_client->cl_rtt;
+ unsigned timer = task->tk_msg.rpc_proc->p_timer;
- read_lock(&sk->sk_callback_lock);
- if (!(xprt = xprt_from_sock(sk)))
- goto out;
- dprintk("RPC: tcp_state_change client %p...\n", xprt);
- dprintk("RPC: state %x conn %d dead %d zapped %d\n",
- sk->sk_state, xprt_connected(xprt),
- sock_flag(sk, SOCK_DEAD),
- sock_flag(sk, SOCK_ZAPPED));
-
- switch (sk->sk_state) {
- case TCP_ESTABLISHED:
- spin_lock_bh(&xprt->sock_lock);
- if (!xprt_test_and_set_connected(xprt)) {
- /* Reset TCP record info */
- xprt->tcp_offset = 0;
- xprt->tcp_reclen = 0;
- xprt->tcp_copied = 0;
- xprt->tcp_flags = XPRT_COPY_RECM | XPRT_COPY_XID;
- rpc_wake_up(&xprt->pending);
- }
- spin_unlock_bh(&xprt->sock_lock);
- break;
- case TCP_SYN_SENT:
- case TCP_SYN_RECV:
- break;
- default:
- xprt_disconnect(xprt);
- break;
+ if (timer) {
+ if (req->rq_ntrans == 1)
+ rpc_update_rtt(rtt, timer,
+ (long)jiffies - req->rq_xtime);
+ rpc_set_timeo(rtt, timer, req->rq_ntrans - 1);
}
- out:
- read_unlock(&sk->sk_callback_lock);
}
-/*
- * Called when more output buffer space is available for this socket.
- * We try not to wake our writers until they can make "significant"
- * progress, otherwise we'll waste resources thrashing sock_sendmsg
- * with a bunch of small requests.
+/**
+ * xprt_complete_rqst - called when reply processing is complete
+ * @task: RPC request that recently completed
+ * @copied: actual number of bytes received from the transport
+ *
+ * Caller holds transport lock.
*/
-static void
-xprt_write_space(struct sock *sk)
+void xprt_complete_rqst(struct rpc_task *task, int copied)
{
- struct rpc_xprt *xprt;
- struct socket *sock;
-
- read_lock(&sk->sk_callback_lock);
- if (!(xprt = xprt_from_sock(sk)) || !(sock = sk->sk_socket))
- goto out;
- if (xprt->shutdown)
- goto out;
-
- /* Wait until we have enough socket memory */
- if (xprt->stream) {
- /* from net/core/stream.c:sk_stream_write_space */
- if (sk_stream_wspace(sk) < sk_stream_min_wspace(sk))
- goto out;
- } else {
- /* from net/core/sock.c:sock_def_write_space */
- if (!sock_writeable(sk))
- goto out;
- }
+ struct rpc_rqst *req = task->tk_rqstp;
- if (!test_and_clear_bit(SOCK_NOSPACE, &sock->flags))
- goto out;
+ dprintk("RPC: %5u xid %08x complete (%d bytes received)\n",
+ task->tk_pid, ntohl(req->rq_xid), copied);
- spin_lock_bh(&xprt->sock_lock);
- if (xprt->snd_task)
- rpc_wake_up_task(xprt->snd_task);
- spin_unlock_bh(&xprt->sock_lock);
-out:
- read_unlock(&sk->sk_callback_lock);
+ list_del_init(&req->rq_list);
+ req->rq_received = req->rq_private_buf.len = copied;
+ rpc_wake_up_task(task);
}
-/*
- * RPC receive timeout handler.
- */
-static void
-xprt_timer(struct rpc_task *task)
+static void xprt_timer(struct rpc_task *task)
{
- struct rpc_rqst *req = task->tk_rqstp;
+ struct rpc_rqst *req = task->tk_rqstp;
struct rpc_xprt *xprt = req->rq_xprt;
- spin_lock(&xprt->sock_lock);
- if (req->rq_received)
- goto out;
-
- xprt_adjust_cwnd(req->rq_xprt, -ETIMEDOUT);
- __xprt_put_cong(xprt, req);
+ dprintk("RPC: %4d xprt_timer\n", task->tk_pid);
- dprintk("RPC: %4d xprt_timer (%s request)\n",
- task->tk_pid, req ? "pending" : "backlogged");
-
- task->tk_status = -ETIMEDOUT;
-out:
+ spin_lock(&xprt->transport_lock);
+ if (!req->rq_received) {
+ if (xprt->ops->timer)
+ xprt->ops->timer(task);
+ task->tk_status = -ETIMEDOUT;
+ }
task->tk_timeout = 0;
rpc_wake_up_task(task);
- spin_unlock(&xprt->sock_lock);
+ spin_unlock(&xprt->transport_lock);
}
-/*
- * Place the actual RPC call.
- * We have to copy the iovec because sendmsg fiddles with its contents.
+/**
+ * xprt_prepare_transmit - reserve the transport before sending a request
+ * @task: RPC task about to send a request
+ *
*/
-int
-xprt_prepare_transmit(struct rpc_task *task)
+int xprt_prepare_transmit(struct rpc_task *task)
{
struct rpc_rqst *req = task->tk_rqstp;
struct rpc_xprt *xprt = req->rq_xprt;
@@ -1191,12 +690,12 @@ xprt_prepare_transmit(struct rpc_task *task)
if (xprt->shutdown)
return -EIO;
- spin_lock_bh(&xprt->sock_lock);
+ spin_lock_bh(&xprt->transport_lock);
if (req->rq_received && !req->rq_bytes_sent) {
err = req->rq_received;
goto out_unlock;
}
- if (!__xprt_lock_write(xprt, task)) {
+ if (!xprt->ops->reserve_xprt(task)) {
err = -EAGAIN;
goto out_unlock;
}
@@ -1206,39 +705,42 @@ xprt_prepare_transmit(struct rpc_task *task)
goto out_unlock;
}
out_unlock:
- spin_unlock_bh(&xprt->sock_lock);
+ spin_unlock_bh(&xprt->transport_lock);
return err;
}
void
-xprt_transmit(struct rpc_task *task)
+xprt_abort_transmit(struct rpc_task *task)
+{
+ struct rpc_xprt *xprt = task->tk_xprt;
+
+ xprt_release_write(xprt, task);
+}
+
+/**
+ * xprt_transmit - send an RPC request on a transport
+ * @task: controlling RPC task
+ *
+ * We have to copy the iovec because sendmsg fiddles with its contents.
+ */
+void xprt_transmit(struct rpc_task *task)
{
- struct rpc_clnt *clnt = task->tk_client;
struct rpc_rqst *req = task->tk_rqstp;
struct rpc_xprt *xprt = req->rq_xprt;
- int status, retry = 0;
-
+ int status;
dprintk("RPC: %4d xprt_transmit(%u)\n", task->tk_pid, req->rq_slen);
- /* set up everything as needed. */
- /* Write the record marker */
- if (xprt->stream) {
- u32 *marker = req->rq_svec[0].iov_base;
-
- *marker = htonl(0x80000000|(req->rq_slen-sizeof(*marker)));
- }
-
smp_rmb();
if (!req->rq_received) {
if (list_empty(&req->rq_list)) {
- spin_lock_bh(&xprt->sock_lock);
+ spin_lock_bh(&xprt->transport_lock);
/* Update the softirq receive buffer */
memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
sizeof(req->rq_private_buf));
/* Add request to the receive list */
list_add_tail(&req->rq_list, &xprt->recv);
- spin_unlock_bh(&xprt->sock_lock);
+ spin_unlock_bh(&xprt->transport_lock);
xprt_reset_majortimeo(req);
/* Turn off autodisconnect */
del_singleshot_timer_sync(&xprt->timer);
@@ -1246,40 +748,19 @@ xprt_transmit(struct rpc_task *task)
} else if (!req->rq_bytes_sent)
return;
- /* Continue transmitting the packet/record. We must be careful
- * to cope with writespace callbacks arriving _after_ we have
- * called xprt_sendmsg().
- */
- while (1) {
- req->rq_xtime = jiffies;
- status = xprt_sendmsg(xprt, req);
-
- if (status < 0)
- break;
-
- if (xprt->stream) {
- req->rq_bytes_sent += status;
-
- /* If we've sent the entire packet, immediately
- * reset the count of bytes sent. */
- if (req->rq_bytes_sent >= req->rq_slen) {
- req->rq_bytes_sent = 0;
- goto out_receive;
- }
- } else {
- if (status >= req->rq_slen)
- goto out_receive;
- status = -EAGAIN;
- break;
- }
-
- dprintk("RPC: %4d xmit incomplete (%d left of %d)\n",
- task->tk_pid, req->rq_slen - req->rq_bytes_sent,
- req->rq_slen);
-
- status = -EAGAIN;
- if (retry++ > 50)
- break;
+ status = xprt->ops->send_request(task);
+ if (status == 0) {
+ dprintk("RPC: %4d xmit complete\n", task->tk_pid);
+ spin_lock_bh(&xprt->transport_lock);
+ xprt->ops->set_retrans_timeout(task);
+ /* Don't race with disconnect */
+ if (!xprt_connected(xprt))
+ task->tk_status = -ENOTCONN;
+ else if (!req->rq_received)
+ rpc_sleep_on(&xprt->pending, task, NULL, xprt_timer);
+ xprt->ops->release_xprt(xprt, task);
+ spin_unlock_bh(&xprt->transport_lock);
+ return;
}
/* Note: at this point, task->tk_sleeping has not yet been set,
@@ -1289,60 +770,19 @@ xprt_transmit(struct rpc_task *task)
task->tk_status = status;
switch (status) {
- case -EAGAIN:
- if (test_bit(SOCK_ASYNC_NOSPACE, &xprt->sock->flags)) {
- /* Protect against races with xprt_write_space */
- spin_lock_bh(&xprt->sock_lock);
- /* Don't race with disconnect */
- if (!xprt_connected(xprt))
- task->tk_status = -ENOTCONN;
- else if (test_bit(SOCK_NOSPACE, &xprt->sock->flags)) {
- task->tk_timeout = req->rq_timeout;
- rpc_sleep_on(&xprt->pending, task, NULL, NULL);
- }
- spin_unlock_bh(&xprt->sock_lock);
- return;
- }
- /* Keep holding the socket if it is blocked */
- rpc_delay(task, HZ>>4);
- return;
case -ECONNREFUSED:
- task->tk_timeout = RPC_REESTABLISH_TIMEOUT;
rpc_sleep_on(&xprt->sending, task, NULL, NULL);
+ case -EAGAIN:
case -ENOTCONN:
return;
default:
- if (xprt->stream)
- xprt_disconnect(xprt);
+ break;
}
xprt_release_write(xprt, task);
return;
- out_receive:
- dprintk("RPC: %4d xmit complete\n", task->tk_pid);
- /* Set the task's receive timeout value */
- spin_lock_bh(&xprt->sock_lock);
- if (!xprt->nocong) {
- int timer = task->tk_msg.rpc_proc->p_timer;
- task->tk_timeout = rpc_calc_rto(clnt->cl_rtt, timer);
- task->tk_timeout <<= rpc_ntimeo(clnt->cl_rtt, timer) + req->rq_retries;
- if (task->tk_timeout > xprt->timeout.to_maxval || task->tk_timeout == 0)
- task->tk_timeout = xprt->timeout.to_maxval;
- } else
- task->tk_timeout = req->rq_timeout;
- /* Don't race with disconnect */
- if (!xprt_connected(xprt))
- task->tk_status = -ENOTCONN;
- else if (!req->rq_received)
- rpc_sleep_on(&xprt->pending, task, NULL, xprt_timer);
- __xprt_release_write(xprt, task);
- spin_unlock_bh(&xprt->sock_lock);
}
-/*
- * Reserve an RPC call slot.
- */
-static inline void
-do_xprt_reserve(struct rpc_task *task)
+static inline void do_xprt_reserve(struct rpc_task *task)
{
struct rpc_xprt *xprt = task->tk_xprt;
@@ -1362,22 +802,25 @@ do_xprt_reserve(struct rpc_task *task)
rpc_sleep_on(&xprt->backlog, task, NULL, NULL);
}
-void
-xprt_reserve(struct rpc_task *task)
+/**
+ * xprt_reserve - allocate an RPC request slot
+ * @task: RPC task requesting a slot allocation
+ *
+ * If no more slots are available, place the task on the transport's
+ * backlog queue.
+ */
+void xprt_reserve(struct rpc_task *task)
{
struct rpc_xprt *xprt = task->tk_xprt;
task->tk_status = -EIO;
if (!xprt->shutdown) {
- spin_lock(&xprt->xprt_lock);
+ spin_lock(&xprt->reserve_lock);
do_xprt_reserve(task);
- spin_unlock(&xprt->xprt_lock);
+ spin_unlock(&xprt->reserve_lock);
}
}
-/*
- * Allocate a 'unique' XID
- */
static inline u32 xprt_alloc_xid(struct rpc_xprt *xprt)
{
return xprt->xid++;
@@ -1388,11 +831,7 @@ static inline void xprt_init_xid(struct rpc_xprt *xprt)
get_random_bytes(&xprt->xid, sizeof(xprt->xid));
}
-/*
- * Initialize RPC request
- */
-static void
-xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt)
+static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt)
{
struct rpc_rqst *req = task->tk_rqstp;
@@ -1400,128 +839,104 @@ xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt)
req->rq_task = task;
req->rq_xprt = xprt;
req->rq_xid = xprt_alloc_xid(xprt);
+ req->rq_release_snd_buf = NULL;
dprintk("RPC: %4d reserved req %p xid %08x\n", task->tk_pid,
req, ntohl(req->rq_xid));
}
-/*
- * Release an RPC call slot
+/**
+ * xprt_release - release an RPC request slot
+ * @task: task which is finished with the slot
+ *
*/
-void
-xprt_release(struct rpc_task *task)
+void xprt_release(struct rpc_task *task)
{
struct rpc_xprt *xprt = task->tk_xprt;
struct rpc_rqst *req;
if (!(req = task->tk_rqstp))
return;
- spin_lock_bh(&xprt->sock_lock);
- __xprt_release_write(xprt, task);
- __xprt_put_cong(xprt, req);
+ spin_lock_bh(&xprt->transport_lock);
+ xprt->ops->release_xprt(xprt, task);
+ if (xprt->ops->release_request)
+ xprt->ops->release_request(task);
if (!list_empty(&req->rq_list))
list_del(&req->rq_list);
xprt->last_used = jiffies;
if (list_empty(&xprt->recv) && !xprt->shutdown)
- mod_timer(&xprt->timer, xprt->last_used + XPRT_IDLE_TIMEOUT);
- spin_unlock_bh(&xprt->sock_lock);
+ mod_timer(&xprt->timer,
+ xprt->last_used + xprt->idle_timeout);
+ spin_unlock_bh(&xprt->transport_lock);
task->tk_rqstp = NULL;
+ if (req->rq_release_snd_buf)
+ req->rq_release_snd_buf(req);
memset(req, 0, sizeof(*req)); /* mark unused */
dprintk("RPC: %4d release request %p\n", task->tk_pid, req);
- spin_lock(&xprt->xprt_lock);
+ spin_lock(&xprt->reserve_lock);
list_add(&req->rq_list, &xprt->free);
- xprt_clear_backlog(xprt);
- spin_unlock(&xprt->xprt_lock);
-}
-
-/*
- * Set default timeout parameters
- */
-static void
-xprt_default_timeout(struct rpc_timeout *to, int proto)
-{
- if (proto == IPPROTO_UDP)
- xprt_set_timeout(to, 5, 5 * HZ);
- else
- xprt_set_timeout(to, 5, 60 * HZ);
+ rpc_wake_up_next(&xprt->backlog);
+ spin_unlock(&xprt->reserve_lock);
}
-/*
- * Set constant timeout
+/**
+ * xprt_set_timeout - set constant RPC timeout
+ * @to: RPC timeout parameters to set up
+ * @retr: number of retries
+ * @incr: amount of increase after each retry
+ *
*/
-void
-xprt_set_timeout(struct rpc_timeout *to, unsigned int retr, unsigned long incr)
+void xprt_set_timeout(struct rpc_timeout *to, unsigned int retr, unsigned long incr)
{
to->to_initval =
to->to_increment = incr;
- to->to_maxval = incr * retr;
+ to->to_maxval = to->to_initval + (incr * retr);
to->to_retries = retr;
to->to_exponential = 0;
}
-unsigned int xprt_udp_slot_table_entries = RPC_DEF_SLOT_TABLE;
-unsigned int xprt_tcp_slot_table_entries = RPC_DEF_SLOT_TABLE;
-
-/*
- * Initialize an RPC client
- */
-static struct rpc_xprt *
-xprt_setup(int proto, struct sockaddr_in *ap, struct rpc_timeout *to)
+static struct rpc_xprt *xprt_setup(int proto, struct sockaddr_in *ap, struct rpc_timeout *to)
{
+ int result;
struct rpc_xprt *xprt;
- unsigned int entries;
- size_t slot_table_size;
struct rpc_rqst *req;
- dprintk("RPC: setting up %s transport...\n",
- proto == IPPROTO_UDP? "UDP" : "TCP");
-
- entries = (proto == IPPROTO_TCP)?
- xprt_tcp_slot_table_entries : xprt_udp_slot_table_entries;
-
if ((xprt = kmalloc(sizeof(struct rpc_xprt), GFP_KERNEL)) == NULL)
return ERR_PTR(-ENOMEM);
memset(xprt, 0, sizeof(*xprt)); /* Nnnngh! */
- xprt->max_reqs = entries;
- slot_table_size = entries * sizeof(xprt->slot[0]);
- xprt->slot = kmalloc(slot_table_size, GFP_KERNEL);
- if (xprt->slot == NULL) {
- kfree(xprt);
- return ERR_PTR(-ENOMEM);
- }
- memset(xprt->slot, 0, slot_table_size);
xprt->addr = *ap;
- xprt->prot = proto;
- xprt->stream = (proto == IPPROTO_TCP)? 1 : 0;
- if (xprt->stream) {
- xprt->cwnd = RPC_MAXCWND(xprt);
- xprt->nocong = 1;
- xprt->max_payload = (1U << 31) - 1;
- } else {
- xprt->cwnd = RPC_INITCWND;
- xprt->max_payload = (1U << 16) - (MAX_HEADER << 3);
+
+ switch (proto) {
+ case IPPROTO_UDP:
+ result = xs_setup_udp(xprt, to);
+ break;
+ case IPPROTO_TCP:
+ result = xs_setup_tcp(xprt, to);
+ break;
+ default:
+ printk(KERN_ERR "RPC: unrecognized transport protocol: %d\n",
+ proto);
+ result = -EIO;
+ break;
+ }
+ if (result) {
+ kfree(xprt);
+ return ERR_PTR(result);
}
- spin_lock_init(&xprt->sock_lock);
- spin_lock_init(&xprt->xprt_lock);
- init_waitqueue_head(&xprt->cong_wait);
+
+ spin_lock_init(&xprt->transport_lock);
+ spin_lock_init(&xprt->reserve_lock);
INIT_LIST_HEAD(&xprt->free);
INIT_LIST_HEAD(&xprt->recv);
- INIT_WORK(&xprt->sock_connect, xprt_socket_connect, xprt);
- INIT_WORK(&xprt->task_cleanup, xprt_socket_autoclose, xprt);
+ INIT_WORK(&xprt->task_cleanup, xprt_autoclose, xprt);
init_timer(&xprt->timer);
xprt->timer.function = xprt_init_autodisconnect;
xprt->timer.data = (unsigned long) xprt;
xprt->last_used = jiffies;
- xprt->port = XPRT_MAX_RESVPORT;
-
- /* Set timeout parameters */
- if (to) {
- xprt->timeout = *to;
- } else
- xprt_default_timeout(&xprt->timeout, xprt->prot);
+ xprt->cwnd = RPC_INITCWND;
rpc_init_wait_queue(&xprt->pending, "xprt_pending");
rpc_init_wait_queue(&xprt->sending, "xprt_sending");
@@ -1529,139 +944,25 @@ xprt_setup(int proto, struct sockaddr_in *ap, struct rpc_timeout *to)
rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog");
/* initialize free list */
- for (req = &xprt->slot[entries-1]; req >= &xprt->slot[0]; req--)
+ for (req = &xprt->slot[xprt->max_reqs-1]; req >= &xprt->slot[0]; req--)
list_add(&req->rq_list, &xprt->free);
xprt_init_xid(xprt);
- /* Check whether we want to use a reserved port */
- xprt->resvport = capable(CAP_NET_BIND_SERVICE) ? 1 : 0;
-
dprintk("RPC: created transport %p with %u slots\n", xprt,
xprt->max_reqs);
return xprt;
}
-/*
- * Bind to a reserved port
- */
-static inline int xprt_bindresvport(struct rpc_xprt *xprt, struct socket *sock)
-{
- struct sockaddr_in myaddr = {
- .sin_family = AF_INET,
- };
- int err, port;
-
- /* Were we already bound to a given port? Try to reuse it */
- port = xprt->port;
- do {
- myaddr.sin_port = htons(port);
- err = sock->ops->bind(sock, (struct sockaddr *) &myaddr,
- sizeof(myaddr));
- if (err == 0) {
- xprt->port = port;
- return 0;
- }
- if (--port == 0)
- port = XPRT_MAX_RESVPORT;
- } while (err == -EADDRINUSE && port != xprt->port);
-
- printk("RPC: Can't bind to reserved port (%d).\n", -err);
- return err;
-}
-
-static void
-xprt_bind_socket(struct rpc_xprt *xprt, struct socket *sock)
-{
- struct sock *sk = sock->sk;
-
- if (xprt->inet)
- return;
-
- write_lock_bh(&sk->sk_callback_lock);
- sk->sk_user_data = xprt;
- xprt->old_data_ready = sk->sk_data_ready;
- xprt->old_state_change = sk->sk_state_change;
- xprt->old_write_space = sk->sk_write_space;
- if (xprt->prot == IPPROTO_UDP) {
- sk->sk_data_ready = udp_data_ready;
- sk->sk_no_check = UDP_CSUM_NORCV;
- xprt_set_connected(xprt);
- } else {
- tcp_sk(sk)->nonagle = 1; /* disable Nagle's algorithm */
- sk->sk_data_ready = tcp_data_ready;
- sk->sk_state_change = tcp_state_change;
- xprt_clear_connected(xprt);
- }
- sk->sk_write_space = xprt_write_space;
-
- /* Reset to new socket */
- xprt->sock = sock;
- xprt->inet = sk;
- write_unlock_bh(&sk->sk_callback_lock);
-
- return;
-}
-
-/*
- * Set socket buffer length
- */
-void
-xprt_sock_setbufsize(struct rpc_xprt *xprt)
-{
- struct sock *sk = xprt->inet;
-
- if (xprt->stream)
- return;
- if (xprt->rcvsize) {
- sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
- sk->sk_rcvbuf = xprt->rcvsize * xprt->max_reqs * 2;
- }
- if (xprt->sndsize) {
- sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
- sk->sk_sndbuf = xprt->sndsize * xprt->max_reqs * 2;
- sk->sk_write_space(sk);
- }
-}
-
-/*
- * Datastream sockets are created here, but xprt_connect will create
- * and connect stream sockets.
- */
-static struct socket * xprt_create_socket(struct rpc_xprt *xprt, int proto, int resvport)
-{
- struct socket *sock;
- int type, err;
-
- dprintk("RPC: xprt_create_socket(%s %d)\n",
- (proto == IPPROTO_UDP)? "udp" : "tcp", proto);
-
- type = (proto == IPPROTO_UDP)? SOCK_DGRAM : SOCK_STREAM;
-
- if ((err = sock_create_kern(PF_INET, type, proto, &sock)) < 0) {
- printk("RPC: can't create socket (%d).\n", -err);
- return NULL;
- }
-
- /* If the caller has the capability, bind to a reserved port */
- if (resvport && xprt_bindresvport(xprt, sock) < 0) {
- printk("RPC: can't bind to reserved port.\n");
- goto failed;
- }
-
- return sock;
-
-failed:
- sock_release(sock);
- return NULL;
-}
-
-/*
- * Create an RPC client transport given the protocol and peer address.
+/**
+ * xprt_create_proto - create an RPC client transport
+ * @proto: requested transport protocol
+ * @sap: remote peer's address
+ * @to: timeout parameters for new transport
+ *
*/
-struct rpc_xprt *
-xprt_create_proto(int proto, struct sockaddr_in *sap, struct rpc_timeout *to)
+struct rpc_xprt *xprt_create_proto(int proto, struct sockaddr_in *sap, struct rpc_timeout *to)
{
struct rpc_xprt *xprt;
@@ -1673,46 +974,26 @@ xprt_create_proto(int proto, struct sockaddr_in *sap, struct rpc_timeout *to)
return xprt;
}
-/*
- * Prepare for transport shutdown.
- */
-static void
-xprt_shutdown(struct rpc_xprt *xprt)
+static void xprt_shutdown(struct rpc_xprt *xprt)
{
xprt->shutdown = 1;
rpc_wake_up(&xprt->sending);
rpc_wake_up(&xprt->resend);
- rpc_wake_up(&xprt->pending);
+ xprt_wake_pending_tasks(xprt, -EIO);
rpc_wake_up(&xprt->backlog);
- wake_up(&xprt->cong_wait);
del_timer_sync(&xprt->timer);
-
- /* synchronously wait for connect worker to finish */
- cancel_delayed_work(&xprt->sock_connect);
- flush_scheduled_work();
}
-/*
- * Clear the xprt backlog queue
- */
-static int
-xprt_clear_backlog(struct rpc_xprt *xprt) {
- rpc_wake_up_next(&xprt->backlog);
- wake_up(&xprt->cong_wait);
- return 1;
-}
-
-/*
- * Destroy an RPC transport, killing off all requests.
+/**
+ * xprt_destroy - destroy an RPC transport, killing off all requests.
+ * @xprt: transport to destroy
+ *
*/
-int
-xprt_destroy(struct rpc_xprt *xprt)
+int xprt_destroy(struct rpc_xprt *xprt)
{
dprintk("RPC: destroying transport %p\n", xprt);
xprt_shutdown(xprt);
- xprt_disconnect(xprt);
- xprt_close(xprt);
- kfree(xprt->slot);
+ xprt->ops->destroy(xprt);
kfree(xprt);
return 0;
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
new file mode 100644
index 00000000000..2e1529217e6
--- /dev/null
+++ b/net/sunrpc/xprtsock.c
@@ -0,0 +1,1252 @@
+/*
+ * linux/net/sunrpc/xprtsock.c
+ *
+ * Client-side transport implementation for sockets.
+ *
+ * TCP callback races fixes (C) 1998 Red Hat Software <alan@redhat.com>
+ * TCP send fixes (C) 1998 Red Hat Software <alan@redhat.com>
+ * TCP NFS related read + write fixes
+ * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie>
+ *
+ * Rewrite of larges part of the code in order to stabilize TCP stuff.
+ * Fix behaviour when socket buffer is full.
+ * (C) 1999 Trond Myklebust <trond.myklebust@fys.uio.no>
+ *
+ * IP socket transport implementation, (C) 2005 Chuck Lever <cel@netapp.com>
+ */
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/capability.h>
+#include <linux/sched.h>
+#include <linux/pagemap.h>
+#include <linux/errno.h>
+#include <linux/socket.h>
+#include <linux/in.h>
+#include <linux/net.h>
+#include <linux/mm.h>
+#include <linux/udp.h>
+#include <linux/tcp.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/file.h>
+
+#include <net/sock.h>
+#include <net/checksum.h>
+#include <net/udp.h>
+#include <net/tcp.h>
+
+/*
+ * How many times to try sending a request on a socket before waiting
+ * for the socket buffer to clear.
+ */
+#define XS_SENDMSG_RETRY (10U)
+
+/*
+ * Time out for an RPC UDP socket connect. UDP socket connects are
+ * synchronous, but we set a timeout anyway in case of resource
+ * exhaustion on the local host.
+ */
+#define XS_UDP_CONN_TO (5U * HZ)
+
+/*
+ * Wait duration for an RPC TCP connection to be established. Solaris
+ * NFS over TCP uses 60 seconds, for example, which is in line with how
+ * long a server takes to reboot.
+ */
+#define XS_TCP_CONN_TO (60U * HZ)
+
+/*
+ * Wait duration for a reply from the RPC portmapper.
+ */
+#define XS_BIND_TO (60U * HZ)
+
+/*
+ * Delay if a UDP socket connect error occurs. This is most likely some
+ * kind of resource problem on the local host.
+ */
+#define XS_UDP_REEST_TO (2U * HZ)
+
+/*
+ * The reestablish timeout allows clients to delay for a bit before attempting
+ * to reconnect to a server that just dropped our connection.
+ *
+ * We implement an exponential backoff when trying to reestablish a TCP
+ * transport connection with the server. Some servers like to drop a TCP
+ * connection when they are overworked, so we start with a short timeout and
+ * increase over time if the server is down or not responding.
+ */
+#define XS_TCP_INIT_REEST_TO (3U * HZ)
+#define XS_TCP_MAX_REEST_TO (5U * 60 * HZ)
+
+/*
+ * TCP idle timeout; client drops the transport socket if it is idle
+ * for this long. Note that we also timeout UDP sockets to prevent
+ * holding port numbers when there is no RPC traffic.
+ */
+#define XS_IDLE_DISC_TO (5U * 60 * HZ)
+
+#ifdef RPC_DEBUG
+# undef RPC_DEBUG_DATA
+# define RPCDBG_FACILITY RPCDBG_TRANS
+#endif
+
+#ifdef RPC_DEBUG_DATA
+static void xs_pktdump(char *msg, u32 *packet, unsigned int count)
+{
+ u8 *buf = (u8 *) packet;
+ int j;
+
+ dprintk("RPC: %s\n", msg);
+ for (j = 0; j < count && j < 128; j += 4) {
+ if (!(j & 31)) {
+ if (j)
+ dprintk("\n");
+ dprintk("0x%04x ", j);
+ }
+ dprintk("%02x%02x%02x%02x ",
+ buf[j], buf[j+1], buf[j+2], buf[j+3]);
+ }
+ dprintk("\n");
+}
+#else
+static inline void xs_pktdump(char *msg, u32 *packet, unsigned int count)
+{
+ /* NOP */
+}
+#endif
+
+#define XS_SENDMSG_FLAGS (MSG_DONTWAIT | MSG_NOSIGNAL)
+
+static inline int xs_send_head(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base, unsigned int len)
+{
+ struct kvec iov = {
+ .iov_base = xdr->head[0].iov_base + base,
+ .iov_len = len - base,
+ };
+ struct msghdr msg = {
+ .msg_name = addr,
+ .msg_namelen = addrlen,
+ .msg_flags = XS_SENDMSG_FLAGS,
+ };
+
+ if (xdr->len > len)
+ msg.msg_flags |= MSG_MORE;
+
+ if (likely(iov.iov_len))
+ return kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len);
+ return kernel_sendmsg(sock, &msg, NULL, 0, 0);
+}
+
+static int xs_send_tail(struct socket *sock, struct xdr_buf *xdr, unsigned int base, unsigned int len)
+{
+ struct kvec iov = {
+ .iov_base = xdr->tail[0].iov_base + base,
+ .iov_len = len - base,
+ };
+ struct msghdr msg = {
+ .msg_flags = XS_SENDMSG_FLAGS,
+ };
+
+ return kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len);
+}
+
+/**
+ * xs_sendpages - write pages directly to a socket
+ * @sock: socket to send on
+ * @addr: UDP only -- address of destination
+ * @addrlen: UDP only -- length of destination address
+ * @xdr: buffer containing this request
+ * @base: starting position in the buffer
+ *
+ */
+static inline int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base)
+{
+ struct page **ppage = xdr->pages;
+ unsigned int len, pglen = xdr->page_len;
+ int err, ret = 0;
+ ssize_t (*sendpage)(struct socket *, struct page *, int, size_t, int);
+
+ if (unlikely(!sock))
+ return -ENOTCONN;
+
+ clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags);
+
+ len = xdr->head[0].iov_len;
+ if (base < len || (addr != NULL && base == 0)) {
+ err = xs_send_head(sock, addr, addrlen, xdr, base, len);
+ if (ret == 0)
+ ret = err;
+ else if (err > 0)
+ ret += err;
+ if (err != (len - base))
+ goto out;
+ base = 0;
+ } else
+ base -= len;
+
+ if (unlikely(pglen == 0))
+ goto copy_tail;
+ if (unlikely(base >= pglen)) {
+ base -= pglen;
+ goto copy_tail;
+ }
+ if (base || xdr->page_base) {
+ pglen -= base;
+ base += xdr->page_base;
+ ppage += base >> PAGE_CACHE_SHIFT;
+ base &= ~PAGE_CACHE_MASK;
+ }
+
+ sendpage = sock->ops->sendpage ? : sock_no_sendpage;
+ do {
+ int flags = XS_SENDMSG_FLAGS;
+
+ len = PAGE_CACHE_SIZE;
+ if (base)
+ len -= base;
+ if (pglen < len)
+ len = pglen;
+
+ if (pglen != len || xdr->tail[0].iov_len != 0)
+ flags |= MSG_MORE;
+
+ /* Hmm... We might be dealing with highmem pages */
+ if (PageHighMem(*ppage))
+ sendpage = sock_no_sendpage;
+ err = sendpage(sock, *ppage, base, len, flags);
+ if (ret == 0)
+ ret = err;
+ else if (err > 0)
+ ret += err;
+ if (err != len)
+ goto out;
+ base = 0;
+ ppage++;
+ } while ((pglen -= len) != 0);
+copy_tail:
+ len = xdr->tail[0].iov_len;
+ if (base < len) {
+ err = xs_send_tail(sock, xdr, base, len);
+ if (ret == 0)
+ ret = err;
+ else if (err > 0)
+ ret += err;
+ }
+out:
+ return ret;
+}
+
+/**
+ * xs_nospace - place task on wait queue if transmit was incomplete
+ * @task: task to put to sleep
+ *
+ */
+static void xs_nospace(struct rpc_task *task)
+{
+ struct rpc_rqst *req = task->tk_rqstp;
+ struct rpc_xprt *xprt = req->rq_xprt;
+
+ dprintk("RPC: %4d xmit incomplete (%u left of %u)\n",
+ task->tk_pid, req->rq_slen - req->rq_bytes_sent,
+ req->rq_slen);
+
+ if (test_bit(SOCK_ASYNC_NOSPACE, &xprt->sock->flags)) {
+ /* Protect against races with write_space */
+ spin_lock_bh(&xprt->transport_lock);
+
+ /* Don't race with disconnect */
+ if (!xprt_connected(xprt))
+ task->tk_status = -ENOTCONN;
+ else if (test_bit(SOCK_NOSPACE, &xprt->sock->flags))
+ xprt_wait_for_buffer_space(task);
+
+ spin_unlock_bh(&xprt->transport_lock);
+ } else
+ /* Keep holding the socket if it is blocked */
+ rpc_delay(task, HZ>>4);
+}
+
+/**
+ * xs_udp_send_request - write an RPC request to a UDP socket
+ * @task: address of RPC task that manages the state of an RPC request
+ *
+ * Return values:
+ * 0: The request has been sent
+ * EAGAIN: The socket was blocked, please call again later to
+ * complete the request
+ * ENOTCONN: Caller needs to invoke connect logic then call again
+ * other: Some other error occured, the request was not sent
+ */
+static int xs_udp_send_request(struct rpc_task *task)
+{
+ struct rpc_rqst *req = task->tk_rqstp;
+ struct rpc_xprt *xprt = req->rq_xprt;
+ struct xdr_buf *xdr = &req->rq_snd_buf;
+ int status;
+
+ xs_pktdump("packet data:",
+ req->rq_svec->iov_base,
+ req->rq_svec->iov_len);
+
+ req->rq_xtime = jiffies;
+ status = xs_sendpages(xprt->sock, (struct sockaddr *) &xprt->addr,
+ sizeof(xprt->addr), xdr, req->rq_bytes_sent);
+
+ dprintk("RPC: xs_udp_send_request(%u) = %d\n",
+ xdr->len - req->rq_bytes_sent, status);
+
+ if (likely(status >= (int) req->rq_slen))
+ return 0;
+
+ /* Still some bytes left; set up for a retry later. */
+ if (status > 0)
+ status = -EAGAIN;
+
+ switch (status) {
+ case -ENETUNREACH:
+ case -EPIPE:
+ case -ECONNREFUSED:
+ /* When the server has died, an ICMP port unreachable message
+ * prompts ECONNREFUSED. */
+ break;
+ case -EAGAIN:
+ xs_nospace(task);
+ break;
+ default:
+ dprintk("RPC: sendmsg returned unrecognized error %d\n",
+ -status);
+ break;
+ }
+
+ return status;
+}
+
+static inline void xs_encode_tcp_record_marker(struct xdr_buf *buf)
+{
+ u32 reclen = buf->len - sizeof(rpc_fraghdr);
+ rpc_fraghdr *base = buf->head[0].iov_base;
+ *base = htonl(RPC_LAST_STREAM_FRAGMENT | reclen);
+}
+
+/**
+ * xs_tcp_send_request - write an RPC request to a TCP socket
+ * @task: address of RPC task that manages the state of an RPC request
+ *
+ * Return values:
+ * 0: The request has been sent
+ * EAGAIN: The socket was blocked, please call again later to
+ * complete the request
+ * ENOTCONN: Caller needs to invoke connect logic then call again
+ * other: Some other error occured, the request was not sent
+ *
+ * XXX: In the case of soft timeouts, should we eventually give up
+ * if sendmsg is not able to make progress?
+ */
+static int xs_tcp_send_request(struct rpc_task *task)
+{
+ struct rpc_rqst *req = task->tk_rqstp;
+ struct rpc_xprt *xprt = req->rq_xprt;
+ struct xdr_buf *xdr = &req->rq_snd_buf;
+ int status, retry = 0;
+
+ xs_encode_tcp_record_marker(&req->rq_snd_buf);
+
+ xs_pktdump("packet data:",
+ req->rq_svec->iov_base,
+ req->rq_svec->iov_len);
+
+ /* Continue transmitting the packet/record. We must be careful
+ * to cope with writespace callbacks arriving _after_ we have
+ * called sendmsg(). */
+ while (1) {
+ req->rq_xtime = jiffies;
+ status = xs_sendpages(xprt->sock, NULL, 0, xdr,
+ req->rq_bytes_sent);
+
+ dprintk("RPC: xs_tcp_send_request(%u) = %d\n",
+ xdr->len - req->rq_bytes_sent, status);
+
+ if (unlikely(status < 0))
+ break;
+
+ /* If we've sent the entire packet, immediately
+ * reset the count of bytes sent. */
+ req->rq_bytes_sent += status;
+ if (likely(req->rq_bytes_sent >= req->rq_slen)) {
+ req->rq_bytes_sent = 0;
+ return 0;
+ }
+
+ status = -EAGAIN;
+ if (retry++ > XS_SENDMSG_RETRY)
+ break;
+ }
+
+ switch (status) {
+ case -EAGAIN:
+ xs_nospace(task);
+ break;
+ case -ECONNREFUSED:
+ case -ECONNRESET:
+ case -ENOTCONN:
+ case -EPIPE:
+ status = -ENOTCONN;
+ break;
+ default:
+ dprintk("RPC: sendmsg returned unrecognized error %d\n",
+ -status);
+ xprt_disconnect(xprt);
+ break;
+ }
+
+ return status;
+}
+
+/**
+ * xs_close - close a socket
+ * @xprt: transport
+ *
+ * This is used when all requests are complete; ie, no DRC state remains
+ * on the server we want to save.
+ */
+static void xs_close(struct rpc_xprt *xprt)
+{
+ struct socket *sock = xprt->sock;
+ struct sock *sk = xprt->inet;
+
+ if (!sk)
+ return;
+
+ dprintk("RPC: xs_close xprt %p\n", xprt);
+
+ write_lock_bh(&sk->sk_callback_lock);
+ xprt->inet = NULL;
+ xprt->sock = NULL;
+
+ sk->sk_user_data = NULL;
+ sk->sk_data_ready = xprt->old_data_ready;
+ sk->sk_state_change = xprt->old_state_change;
+ sk->sk_write_space = xprt->old_write_space;
+ write_unlock_bh(&sk->sk_callback_lock);
+
+ sk->sk_no_check = 0;
+
+ sock_release(sock);
+}
+
+/**
+ * xs_destroy - prepare to shutdown a transport
+ * @xprt: doomed transport
+ *
+ */
+static void xs_destroy(struct rpc_xprt *xprt)
+{
+ dprintk("RPC: xs_destroy xprt %p\n", xprt);
+
+ cancel_delayed_work(&xprt->connect_worker);
+ flush_scheduled_work();
+
+ xprt_disconnect(xprt);
+ xs_close(xprt);
+ kfree(xprt->slot);
+}
+
+static inline struct rpc_xprt *xprt_from_sock(struct sock *sk)
+{
+ return (struct rpc_xprt *) sk->sk_user_data;
+}
+
+/**
+ * xs_udp_data_ready - "data ready" callback for UDP sockets
+ * @sk: socket with data to read
+ * @len: how much data to read
+ *
+ */
+static void xs_udp_data_ready(struct sock *sk, int len)
+{
+ struct rpc_task *task;
+ struct rpc_xprt *xprt;
+ struct rpc_rqst *rovr;
+ struct sk_buff *skb;
+ int err, repsize, copied;
+ u32 _xid, *xp;
+
+ read_lock(&sk->sk_callback_lock);
+ dprintk("RPC: xs_udp_data_ready...\n");
+ if (!(xprt = xprt_from_sock(sk)))
+ goto out;
+
+ if ((skb = skb_recv_datagram(sk, 0, 1, &err)) == NULL)
+ goto out;
+
+ if (xprt->shutdown)
+ goto dropit;
+
+ repsize = skb->len - sizeof(struct udphdr);
+ if (repsize < 4) {
+ dprintk("RPC: impossible RPC reply size %d!\n", repsize);
+ goto dropit;
+ }
+
+ /* Copy the XID from the skb... */
+ xp = skb_header_pointer(skb, sizeof(struct udphdr),
+ sizeof(_xid), &_xid);
+ if (xp == NULL)
+ goto dropit;
+
+ /* Look up and lock the request corresponding to the given XID */
+ spin_lock(&xprt->transport_lock);
+ rovr = xprt_lookup_rqst(xprt, *xp);
+ if (!rovr)
+ goto out_unlock;
+ task = rovr->rq_task;
+
+ if ((copied = rovr->rq_private_buf.buflen) > repsize)
+ copied = repsize;
+
+ /* Suck it into the iovec, verify checksum if not done by hw. */
+ if (csum_partial_copy_to_xdr(&rovr->rq_private_buf, skb))
+ goto out_unlock;
+
+ /* Something worked... */
+ dst_confirm(skb->dst);
+
+ xprt_adjust_cwnd(task, copied);
+ xprt_update_rtt(task);
+ xprt_complete_rqst(task, copied);
+
+ out_unlock:
+ spin_unlock(&xprt->transport_lock);
+ dropit:
+ skb_free_datagram(sk, skb);
+ out:
+ read_unlock(&sk->sk_callback_lock);
+}
+
+static inline size_t xs_tcp_copy_data(skb_reader_t *desc, void *p, size_t len)
+{
+ if (len > desc->count)
+ len = desc->count;
+ if (skb_copy_bits(desc->skb, desc->offset, p, len)) {
+ dprintk("RPC: failed to copy %zu bytes from skb. %zu bytes remain\n",
+ len, desc->count);
+ return 0;
+ }
+ desc->offset += len;
+ desc->count -= len;
+ dprintk("RPC: copied %zu bytes from skb. %zu bytes remain\n",
+ len, desc->count);
+ return len;
+}
+
+static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, skb_reader_t *desc)
+{
+ size_t len, used;
+ char *p;
+
+ p = ((char *) &xprt->tcp_recm) + xprt->tcp_offset;
+ len = sizeof(xprt->tcp_recm) - xprt->tcp_offset;
+ used = xs_tcp_copy_data(desc, p, len);
+ xprt->tcp_offset += used;
+ if (used != len)
+ return;
+
+ xprt->tcp_reclen = ntohl(xprt->tcp_recm);
+ if (xprt->tcp_reclen & RPC_LAST_STREAM_FRAGMENT)
+ xprt->tcp_flags |= XPRT_LAST_FRAG;
+ else
+ xprt->tcp_flags &= ~XPRT_LAST_FRAG;
+ xprt->tcp_reclen &= RPC_FRAGMENT_SIZE_MASK;
+
+ xprt->tcp_flags &= ~XPRT_COPY_RECM;
+ xprt->tcp_offset = 0;
+
+ /* Sanity check of the record length */
+ if (unlikely(xprt->tcp_reclen < 4)) {
+ dprintk("RPC: invalid TCP record fragment length\n");
+ xprt_disconnect(xprt);
+ return;
+ }
+ dprintk("RPC: reading TCP record fragment of length %d\n",
+ xprt->tcp_reclen);
+}
+
+static void xs_tcp_check_recm(struct rpc_xprt *xprt)
+{
+ dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u, tcp_flags = %lx\n",
+ xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen, xprt->tcp_flags);
+ if (xprt->tcp_offset == xprt->tcp_reclen) {
+ xprt->tcp_flags |= XPRT_COPY_RECM;
+ xprt->tcp_offset = 0;
+ if (xprt->tcp_flags & XPRT_LAST_FRAG) {
+ xprt->tcp_flags &= ~XPRT_COPY_DATA;
+ xprt->tcp_flags |= XPRT_COPY_XID;
+ xprt->tcp_copied = 0;
+ }
+ }
+}
+
+static inline void xs_tcp_read_xid(struct rpc_xprt *xprt, skb_reader_t *desc)
+{
+ size_t len, used;
+ char *p;
+
+ len = sizeof(xprt->tcp_xid) - xprt->tcp_offset;
+ dprintk("RPC: reading XID (%Zu bytes)\n", len);
+ p = ((char *) &xprt->tcp_xid) + xprt->tcp_offset;
+ used = xs_tcp_copy_data(desc, p, len);
+ xprt->tcp_offset += used;
+ if (used != len)
+ return;
+ xprt->tcp_flags &= ~XPRT_COPY_XID;
+ xprt->tcp_flags |= XPRT_COPY_DATA;
+ xprt->tcp_copied = 4;
+ dprintk("RPC: reading reply for XID %08x\n",
+ ntohl(xprt->tcp_xid));
+ xs_tcp_check_recm(xprt);
+}
+
+static inline void xs_tcp_read_request(struct rpc_xprt *xprt, skb_reader_t *desc)
+{
+ struct rpc_rqst *req;
+ struct xdr_buf *rcvbuf;
+ size_t len;
+ ssize_t r;
+
+ /* Find and lock the request corresponding to this xid */
+ spin_lock(&xprt->transport_lock);
+ req = xprt_lookup_rqst(xprt, xprt->tcp_xid);
+ if (!req) {
+ xprt->tcp_flags &= ~XPRT_COPY_DATA;
+ dprintk("RPC: XID %08x request not found!\n",
+ ntohl(xprt->tcp_xid));
+ spin_unlock(&xprt->transport_lock);
+ return;
+ }
+
+ rcvbuf = &req->rq_private_buf;
+ len = desc->count;
+ if (len > xprt->tcp_reclen - xprt->tcp_offset) {
+ skb_reader_t my_desc;
+
+ len = xprt->tcp_reclen - xprt->tcp_offset;
+ memcpy(&my_desc, desc, sizeof(my_desc));
+ my_desc.count = len;
+ r = xdr_partial_copy_from_skb(rcvbuf, xprt->tcp_copied,
+ &my_desc, xs_tcp_copy_data);
+ desc->count -= r;
+ desc->offset += r;
+ } else
+ r = xdr_partial_copy_from_skb(rcvbuf, xprt->tcp_copied,
+ desc, xs_tcp_copy_data);
+
+ if (r > 0) {
+ xprt->tcp_copied += r;
+ xprt->tcp_offset += r;
+ }
+ if (r != len) {
+ /* Error when copying to the receive buffer,
+ * usually because we weren't able to allocate
+ * additional buffer pages. All we can do now
+ * is turn off XPRT_COPY_DATA, so the request
+ * will not receive any additional updates,
+ * and time out.
+ * Any remaining data from this record will
+ * be discarded.
+ */
+ xprt->tcp_flags &= ~XPRT_COPY_DATA;
+ dprintk("RPC: XID %08x truncated request\n",
+ ntohl(xprt->tcp_xid));
+ dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u\n",
+ xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen);
+ goto out;
+ }
+
+ dprintk("RPC: XID %08x read %Zd bytes\n",
+ ntohl(xprt->tcp_xid), r);
+ dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u\n",
+ xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen);
+
+ if (xprt->tcp_copied == req->rq_private_buf.buflen)
+ xprt->tcp_flags &= ~XPRT_COPY_DATA;
+ else if (xprt->tcp_offset == xprt->tcp_reclen) {
+ if (xprt->tcp_flags & XPRT_LAST_FRAG)
+ xprt->tcp_flags &= ~XPRT_COPY_DATA;
+ }
+
+out:
+ if (!(xprt->tcp_flags & XPRT_COPY_DATA))
+ xprt_complete_rqst(req->rq_task, xprt->tcp_copied);
+ spin_unlock(&xprt->transport_lock);
+ xs_tcp_check_recm(xprt);
+}
+
+static inline void xs_tcp_read_discard(struct rpc_xprt *xprt, skb_reader_t *desc)
+{
+ size_t len;
+
+ len = xprt->tcp_reclen - xprt->tcp_offset;
+ if (len > desc->count)
+ len = desc->count;
+ desc->count -= len;
+ desc->offset += len;
+ xprt->tcp_offset += len;
+ dprintk("RPC: discarded %Zu bytes\n", len);
+ xs_tcp_check_recm(xprt);
+}
+
+static int xs_tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, unsigned int offset, size_t len)
+{
+ struct rpc_xprt *xprt = rd_desc->arg.data;
+ skb_reader_t desc = {
+ .skb = skb,
+ .offset = offset,
+ .count = len,
+ .csum = 0
+ };
+
+ dprintk("RPC: xs_tcp_data_recv started\n");
+ do {
+ /* Read in a new fragment marker if necessary */
+ /* Can we ever really expect to get completely empty fragments? */
+ if (xprt->tcp_flags & XPRT_COPY_RECM) {
+ xs_tcp_read_fraghdr(xprt, &desc);
+ continue;
+ }
+ /* Read in the xid if necessary */
+ if (xprt->tcp_flags & XPRT_COPY_XID) {
+ xs_tcp_read_xid(xprt, &desc);
+ continue;
+ }
+ /* Read in the request data */
+ if (xprt->tcp_flags & XPRT_COPY_DATA) {
+ xs_tcp_read_request(xprt, &desc);
+ continue;
+ }
+ /* Skip over any trailing bytes on short reads */
+ xs_tcp_read_discard(xprt, &desc);
+ } while (desc.count);
+ dprintk("RPC: xs_tcp_data_recv done\n");
+ return len - desc.count;
+}
+
+/**
+ * xs_tcp_data_ready - "data ready" callback for TCP sockets
+ * @sk: socket with data to read
+ * @bytes: how much data to read
+ *
+ */
+static void xs_tcp_data_ready(struct sock *sk, int bytes)
+{
+ struct rpc_xprt *xprt;
+ read_descriptor_t rd_desc;
+
+ read_lock(&sk->sk_callback_lock);
+ dprintk("RPC: xs_tcp_data_ready...\n");
+ if (!(xprt = xprt_from_sock(sk)))
+ goto out;
+ if (xprt->shutdown)
+ goto out;
+
+ /* We use rd_desc to pass struct xprt to xs_tcp_data_recv */
+ rd_desc.arg.data = xprt;
+ rd_desc.count = 65536;
+ tcp_read_sock(sk, &rd_desc, xs_tcp_data_recv);
+out:
+ read_unlock(&sk->sk_callback_lock);
+}
+
+/**
+ * xs_tcp_state_change - callback to handle TCP socket state changes
+ * @sk: socket whose state has changed
+ *
+ */
+static void xs_tcp_state_change(struct sock *sk)
+{
+ struct rpc_xprt *xprt;
+
+ read_lock(&sk->sk_callback_lock);
+ if (!(xprt = xprt_from_sock(sk)))
+ goto out;
+ dprintk("RPC: xs_tcp_state_change client %p...\n", xprt);
+ dprintk("RPC: state %x conn %d dead %d zapped %d\n",
+ sk->sk_state, xprt_connected(xprt),
+ sock_flag(sk, SOCK_DEAD),
+ sock_flag(sk, SOCK_ZAPPED));
+
+ switch (sk->sk_state) {
+ case TCP_ESTABLISHED:
+ spin_lock_bh(&xprt->transport_lock);
+ if (!xprt_test_and_set_connected(xprt)) {
+ /* Reset TCP record info */
+ xprt->tcp_offset = 0;
+ xprt->tcp_reclen = 0;
+ xprt->tcp_copied = 0;
+ xprt->tcp_flags = XPRT_COPY_RECM | XPRT_COPY_XID;
+ xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
+ xprt_wake_pending_tasks(xprt, 0);
+ }
+ spin_unlock_bh(&xprt->transport_lock);
+ break;
+ case TCP_SYN_SENT:
+ case TCP_SYN_RECV:
+ break;
+ default:
+ xprt_disconnect(xprt);
+ break;
+ }
+ out:
+ read_unlock(&sk->sk_callback_lock);
+}
+
+/**
+ * xs_udp_write_space - callback invoked when socket buffer space
+ * becomes available
+ * @sk: socket whose state has changed
+ *
+ * Called when more output buffer space is available for this socket.
+ * We try not to wake our writers until they can make "significant"
+ * progress, otherwise we'll waste resources thrashing kernel_sendmsg
+ * with a bunch of small requests.
+ */
+static void xs_udp_write_space(struct sock *sk)
+{
+ read_lock(&sk->sk_callback_lock);
+
+ /* from net/core/sock.c:sock_def_write_space */
+ if (sock_writeable(sk)) {
+ struct socket *sock;
+ struct rpc_xprt *xprt;
+
+ if (unlikely(!(sock = sk->sk_socket)))
+ goto out;
+ if (unlikely(!(xprt = xprt_from_sock(sk))))
+ goto out;
+ if (unlikely(!test_and_clear_bit(SOCK_NOSPACE, &sock->flags)))
+ goto out;
+
+ xprt_write_space(xprt);
+ }
+
+ out:
+ read_unlock(&sk->sk_callback_lock);
+}
+
+/**
+ * xs_tcp_write_space - callback invoked when socket buffer space
+ * becomes available
+ * @sk: socket whose state has changed
+ *
+ * Called when more output buffer space is available for this socket.
+ * We try not to wake our writers until they can make "significant"
+ * progress, otherwise we'll waste resources thrashing kernel_sendmsg
+ * with a bunch of small requests.
+ */
+static void xs_tcp_write_space(struct sock *sk)
+{
+ read_lock(&sk->sk_callback_lock);
+
+ /* from net/core/stream.c:sk_stream_write_space */
+ if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
+ struct socket *sock;
+ struct rpc_xprt *xprt;
+
+ if (unlikely(!(sock = sk->sk_socket)))
+ goto out;
+ if (unlikely(!(xprt = xprt_from_sock(sk))))
+ goto out;
+ if (unlikely(!test_and_clear_bit(SOCK_NOSPACE, &sock->flags)))
+ goto out;
+
+ xprt_write_space(xprt);
+ }
+
+ out:
+ read_unlock(&sk->sk_callback_lock);
+}
+
+static void xs_udp_do_set_buffer_size(struct rpc_xprt *xprt)
+{
+ struct sock *sk = xprt->inet;
+
+ if (xprt->rcvsize) {
+ sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
+ sk->sk_rcvbuf = xprt->rcvsize * xprt->max_reqs * 2;
+ }
+ if (xprt->sndsize) {
+ sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
+ sk->sk_sndbuf = xprt->sndsize * xprt->max_reqs * 2;
+ sk->sk_write_space(sk);
+ }
+}
+
+/**
+ * xs_udp_set_buffer_size - set send and receive limits
+ * @xprt: generic transport
+ * @sndsize: requested size of send buffer, in bytes
+ * @rcvsize: requested size of receive buffer, in bytes
+ *
+ * Set socket send and receive buffer size limits.
+ */
+static void xs_udp_set_buffer_size(struct rpc_xprt *xprt, size_t sndsize, size_t rcvsize)
+{
+ xprt->sndsize = 0;
+ if (sndsize)
+ xprt->sndsize = sndsize + 1024;
+ xprt->rcvsize = 0;
+ if (rcvsize)
+ xprt->rcvsize = rcvsize + 1024;
+
+ xs_udp_do_set_buffer_size(xprt);
+}
+
+/**
+ * xs_udp_timer - called when a retransmit timeout occurs on a UDP transport
+ * @task: task that timed out
+ *
+ * Adjust the congestion window after a retransmit timeout has occurred.
+ */
+static void xs_udp_timer(struct rpc_task *task)
+{
+ xprt_adjust_cwnd(task, -ETIMEDOUT);
+}
+
+static int xs_bindresvport(struct rpc_xprt *xprt, struct socket *sock)
+{
+ struct sockaddr_in myaddr = {
+ .sin_family = AF_INET,
+ };
+ int err;
+ unsigned short port = xprt->port;
+
+ do {
+ myaddr.sin_port = htons(port);
+ err = sock->ops->bind(sock, (struct sockaddr *) &myaddr,
+ sizeof(myaddr));
+ if (err == 0) {
+ xprt->port = port;
+ dprintk("RPC: xs_bindresvport bound to port %u\n",
+ port);
+ return 0;
+ }
+ if (port <= xprt_min_resvport)
+ port = xprt_max_resvport;
+ else
+ port--;
+ } while (err == -EADDRINUSE && port != xprt->port);
+
+ dprintk("RPC: can't bind to reserved port (%d).\n", -err);
+ return err;
+}
+
+/**
+ * xs_udp_connect_worker - set up a UDP socket
+ * @args: RPC transport to connect
+ *
+ * Invoked by a work queue tasklet.
+ */
+static void xs_udp_connect_worker(void *args)
+{
+ struct rpc_xprt *xprt = (struct rpc_xprt *) args;
+ struct socket *sock = xprt->sock;
+ int err, status = -EIO;
+
+ if (xprt->shutdown || xprt->addr.sin_port == 0)
+ goto out;
+
+ dprintk("RPC: xs_udp_connect_worker for xprt %p\n", xprt);
+
+ /* Start by resetting any existing state */
+ xs_close(xprt);
+
+ if ((err = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock)) < 0) {
+ dprintk("RPC: can't create UDP transport socket (%d).\n", -err);
+ goto out;
+ }
+
+ if (xprt->resvport && xs_bindresvport(xprt, sock) < 0) {
+ sock_release(sock);
+ goto out;
+ }
+
+ if (!xprt->inet) {
+ struct sock *sk = sock->sk;
+
+ write_lock_bh(&sk->sk_callback_lock);
+
+ sk->sk_user_data = xprt;
+ xprt->old_data_ready = sk->sk_data_ready;
+ xprt->old_state_change = sk->sk_state_change;
+ xprt->old_write_space = sk->sk_write_space;
+ sk->sk_data_ready = xs_udp_data_ready;
+ sk->sk_write_space = xs_udp_write_space;
+ sk->sk_no_check = UDP_CSUM_NORCV;
+
+ xprt_set_connected(xprt);
+
+ /* Reset to new socket */
+ xprt->sock = sock;
+ xprt->inet = sk;
+
+ write_unlock_bh(&sk->sk_callback_lock);
+ }
+ xs_udp_do_set_buffer_size(xprt);
+ status = 0;
+out:
+ xprt_wake_pending_tasks(xprt, status);
+ xprt_clear_connecting(xprt);
+}
+
+/*
+ * We need to preserve the port number so the reply cache on the server can
+ * find our cached RPC replies when we get around to reconnecting.
+ */
+static void xs_tcp_reuse_connection(struct rpc_xprt *xprt)
+{
+ int result;
+ struct socket *sock = xprt->sock;
+ struct sockaddr any;
+
+ dprintk("RPC: disconnecting xprt %p to reuse port\n", xprt);
+
+ /*
+ * Disconnect the transport socket by doing a connect operation
+ * with AF_UNSPEC. This should return immediately...
+ */
+ memset(&any, 0, sizeof(any));
+ any.sa_family = AF_UNSPEC;
+ result = sock->ops->connect(sock, &any, sizeof(any), 0);
+ if (result)
+ dprintk("RPC: AF_UNSPEC connect return code %d\n",
+ result);
+}
+
+/**
+ * xs_tcp_connect_worker - connect a TCP socket to a remote endpoint
+ * @args: RPC transport to connect
+ *
+ * Invoked by a work queue tasklet.
+ */
+static void xs_tcp_connect_worker(void *args)
+{
+ struct rpc_xprt *xprt = (struct rpc_xprt *)args;
+ struct socket *sock = xprt->sock;
+ int err, status = -EIO;
+
+ if (xprt->shutdown || xprt->addr.sin_port == 0)
+ goto out;
+
+ dprintk("RPC: xs_tcp_connect_worker for xprt %p\n", xprt);
+
+ if (!xprt->sock) {
+ /* start from scratch */
+ if ((err = sock_create_kern(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock)) < 0) {
+ dprintk("RPC: can't create TCP transport socket (%d).\n", -err);
+ goto out;
+ }
+
+ if (xprt->resvport && xs_bindresvport(xprt, sock) < 0) {
+ sock_release(sock);
+ goto out;
+ }
+ } else
+ /* "close" the socket, preserving the local port */
+ xs_tcp_reuse_connection(xprt);
+
+ if (!xprt->inet) {
+ struct sock *sk = sock->sk;
+
+ write_lock_bh(&sk->sk_callback_lock);
+
+ sk->sk_user_data = xprt;
+ xprt->old_data_ready = sk->sk_data_ready;
+ xprt->old_state_change = sk->sk_state_change;
+ xprt->old_write_space = sk->sk_write_space;
+ sk->sk_data_ready = xs_tcp_data_ready;
+ sk->sk_state_change = xs_tcp_state_change;
+ sk->sk_write_space = xs_tcp_write_space;
+
+ /* socket options */
+ sk->sk_userlocks |= SOCK_BINDPORT_LOCK;
+ sock_reset_flag(sk, SOCK_LINGER);
+ tcp_sk(sk)->linger2 = 0;
+ tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF;
+
+ xprt_clear_connected(xprt);
+
+ /* Reset to new socket */
+ xprt->sock = sock;
+ xprt->inet = sk;
+
+ write_unlock_bh(&sk->sk_callback_lock);
+ }
+
+ /* Tell the socket layer to start connecting... */
+ status = sock->ops->connect(sock, (struct sockaddr *) &xprt->addr,
+ sizeof(xprt->addr), O_NONBLOCK);
+ dprintk("RPC: %p connect status %d connected %d sock state %d\n",
+ xprt, -status, xprt_connected(xprt), sock->sk->sk_state);
+ if (status < 0) {
+ switch (status) {
+ case -EINPROGRESS:
+ case -EALREADY:
+ goto out_clear;
+ case -ECONNREFUSED:
+ case -ECONNRESET:
+ /* retry with existing socket, after a delay */
+ break;
+ default:
+ /* get rid of existing socket, and retry */
+ xs_close(xprt);
+ break;
+ }
+ }
+out:
+ xprt_wake_pending_tasks(xprt, status);
+out_clear:
+ xprt_clear_connecting(xprt);
+}
+
+/**
+ * xs_connect - connect a socket to a remote endpoint
+ * @task: address of RPC task that manages state of connect request
+ *
+ * TCP: If the remote end dropped the connection, delay reconnecting.
+ *
+ * UDP socket connects are synchronous, but we use a work queue anyway
+ * to guarantee that even unprivileged user processes can set up a
+ * socket on a privileged port.
+ *
+ * If a UDP socket connect fails, the delay behavior here prevents
+ * retry floods (hard mounts).
+ */
+static void xs_connect(struct rpc_task *task)
+{
+ struct rpc_xprt *xprt = task->tk_xprt;
+
+ if (xprt_test_and_set_connecting(xprt))
+ return;
+
+ if (xprt->sock != NULL) {
+ dprintk("RPC: xs_connect delayed xprt %p for %lu seconds\n",
+ xprt, xprt->reestablish_timeout / HZ);
+ schedule_delayed_work(&xprt->connect_worker,
+ xprt->reestablish_timeout);
+ xprt->reestablish_timeout <<= 1;
+ if (xprt->reestablish_timeout > XS_TCP_MAX_REEST_TO)
+ xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO;
+ } else {
+ dprintk("RPC: xs_connect scheduled xprt %p\n", xprt);
+ schedule_work(&xprt->connect_worker);
+
+ /* flush_scheduled_work can sleep... */
+ if (!RPC_IS_ASYNC(task))
+ flush_scheduled_work();
+ }
+}
+
+static struct rpc_xprt_ops xs_udp_ops = {
+ .set_buffer_size = xs_udp_set_buffer_size,
+ .reserve_xprt = xprt_reserve_xprt_cong,
+ .release_xprt = xprt_release_xprt_cong,
+ .connect = xs_connect,
+ .send_request = xs_udp_send_request,
+ .set_retrans_timeout = xprt_set_retrans_timeout_rtt,
+ .timer = xs_udp_timer,
+ .release_request = xprt_release_rqst_cong,
+ .close = xs_close,
+ .destroy = xs_destroy,
+};
+
+static struct rpc_xprt_ops xs_tcp_ops = {
+ .reserve_xprt = xprt_reserve_xprt,
+ .release_xprt = xprt_release_xprt,
+ .connect = xs_connect,
+ .send_request = xs_tcp_send_request,
+ .set_retrans_timeout = xprt_set_retrans_timeout_def,
+ .close = xs_close,
+ .destroy = xs_destroy,
+};
+
+/**
+ * xs_setup_udp - Set up transport to use a UDP socket
+ * @xprt: transport to set up
+ * @to: timeout parameters
+ *
+ */
+int xs_setup_udp(struct rpc_xprt *xprt, struct rpc_timeout *to)
+{
+ size_t slot_table_size;
+
+ dprintk("RPC: setting up udp-ipv4 transport...\n");
+
+ xprt->max_reqs = xprt_udp_slot_table_entries;
+ slot_table_size = xprt->max_reqs * sizeof(xprt->slot[0]);
+ xprt->slot = kmalloc(slot_table_size, GFP_KERNEL);
+ if (xprt->slot == NULL)
+ return -ENOMEM;
+ memset(xprt->slot, 0, slot_table_size);
+
+ xprt->prot = IPPROTO_UDP;
+ xprt->port = xprt_max_resvport;
+ xprt->tsh_size = 0;
+ xprt->resvport = capable(CAP_NET_BIND_SERVICE) ? 1 : 0;
+ /* XXX: header size can vary due to auth type, IPv6, etc. */
+ xprt->max_payload = (1U << 16) - (MAX_HEADER << 3);
+
+ INIT_WORK(&xprt->connect_worker, xs_udp_connect_worker, xprt);
+ xprt->bind_timeout = XS_BIND_TO;
+ xprt->connect_timeout = XS_UDP_CONN_TO;
+ xprt->reestablish_timeout = XS_UDP_REEST_TO;
+ xprt->idle_timeout = XS_IDLE_DISC_TO;
+
+ xprt->ops = &xs_udp_ops;
+
+ if (to)
+ xprt->timeout = *to;
+ else
+ xprt_set_timeout(&xprt->timeout, 5, 5 * HZ);
+
+ return 0;
+}
+
+/**
+ * xs_setup_tcp - Set up transport to use a TCP socket
+ * @xprt: transport to set up
+ * @to: timeout parameters
+ *
+ */
+int xs_setup_tcp(struct rpc_xprt *xprt, struct rpc_timeout *to)
+{
+ size_t slot_table_size;
+
+ dprintk("RPC: setting up tcp-ipv4 transport...\n");
+
+ xprt->max_reqs = xprt_tcp_slot_table_entries;
+ slot_table_size = xprt->max_reqs * sizeof(xprt->slot[0]);
+ xprt->slot = kmalloc(slot_table_size, GFP_KERNEL);
+ if (xprt->slot == NULL)
+ return -ENOMEM;
+ memset(xprt->slot, 0, slot_table_size);
+
+ xprt->prot = IPPROTO_TCP;
+ xprt->port = xprt_max_resvport;
+ xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32);
+ xprt->resvport = capable(CAP_NET_BIND_SERVICE) ? 1 : 0;
+ xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
+
+ INIT_WORK(&xprt->connect_worker, xs_tcp_connect_worker, xprt);
+ xprt->bind_timeout = XS_BIND_TO;
+ xprt->connect_timeout = XS_TCP_CONN_TO;
+ xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
+ xprt->idle_timeout = XS_IDLE_DISC_TO;
+
+ xprt->ops = &xs_tcp_ops;
+
+ if (to)
+ xprt->timeout = *to;
+ else
+ xprt_set_timeout(&xprt->timeout, 2, 60 * HZ);
+
+ return 0;
+}
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index cbb0ba34a60..0db9e57013f 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1192,46 +1192,6 @@ int xfrm_bundle_ok(struct xfrm_dst *first, struct flowi *fl, int family)
EXPORT_SYMBOL(xfrm_bundle_ok);
-/* Well... that's _TASK_. We need to scan through transformation
- * list and figure out what mss tcp should generate in order to
- * final datagram fit to mtu. Mama mia... :-)
- *
- * Apparently, some easy way exists, but we used to choose the most
- * bizarre ones. :-) So, raising Kalashnikov... tra-ta-ta.
- *
- * Consider this function as something like dark humour. :-)
- */
-static int xfrm_get_mss(struct dst_entry *dst, u32 mtu)
-{
- int res = mtu - dst->header_len;
-
- for (;;) {
- struct dst_entry *d = dst;
- int m = res;
-
- do {
- struct xfrm_state *x = d->xfrm;
- if (x) {
- spin_lock_bh(&x->lock);
- if (x->km.state == XFRM_STATE_VALID &&
- x->type && x->type->get_max_size)
- m = x->type->get_max_size(d->xfrm, m);
- else
- m += x->props.header_len;
- spin_unlock_bh(&x->lock);
- }
- } while ((d = d->child) != NULL);
-
- if (m <= mtu)
- break;
- res -= (m - mtu);
- if (res < 88)
- return mtu;
- }
-
- return res + dst->header_len;
-}
-
int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
{
int err = 0;
@@ -1252,8 +1212,6 @@ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
dst_ops->negative_advice = xfrm_negative_advice;
if (likely(dst_ops->link_failure == NULL))
dst_ops->link_failure = xfrm_link_failure;
- if (likely(dst_ops->get_mss == NULL))
- dst_ops->get_mss = xfrm_get_mss;
if (likely(afinfo->garbage_collect == NULL))
afinfo->garbage_collect = __xfrm_garbage_collect;
xfrm_policy_afinfo[afinfo->family] = afinfo;
@@ -1281,7 +1239,6 @@ int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo)
dst_ops->check = NULL;
dst_ops->negative_advice = NULL;
dst_ops->link_failure = NULL;
- dst_ops->get_mss = NULL;
afinfo->garbage_collect = NULL;
}
}
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 9d206c282cf..8b9a4747417 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -1026,6 +1026,12 @@ void xfrm_state_delete_tunnel(struct xfrm_state *x)
}
EXPORT_SYMBOL(xfrm_state_delete_tunnel);
+/*
+ * This function is NOT optimal. For example, with ESP it will give an
+ * MTU that's usually two bytes short of being optimal. However, it will
+ * usually give an answer that's a multiple of 4 provided the input is
+ * also a multiple of 4.
+ */
int xfrm_state_mtu(struct xfrm_state *x, int mtu)
{
int res = mtu;
diff --git a/security/dummy.c b/security/dummy.c
index 9623a61dfc7..3d34f3de7e8 100644
--- a/security/dummy.c
+++ b/security/dummy.c
@@ -768,7 +768,7 @@ static int dummy_socket_getpeersec(struct socket *sock, char __user *optval,
return -ENOPROTOOPT;
}
-static inline int dummy_sk_alloc_security (struct sock *sk, int family, int priority)
+static inline int dummy_sk_alloc_security (struct sock *sk, int family, gfp_t priority)
{
return 0;
}
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index b13be15165f..447a1e0f48c 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -262,7 +262,7 @@ static void superblock_free_security(struct super_block *sb)
}
#ifdef CONFIG_SECURITY_NETWORK
-static int sk_alloc_security(struct sock *sk, int family, int priority)
+static int sk_alloc_security(struct sock *sk, int family, gfp_t priority)
{
struct sk_security_struct *ssec;
@@ -3380,7 +3380,7 @@ out:
return err;
}
-static int selinux_sk_alloc_security(struct sock *sk, int family, int priority)
+static int selinux_sk_alloc_security(struct sock *sk, int family, gfp_t priority)
{
return sk_alloc_security(sk, family, priority);
}
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
index 8eb140dd2e4..a45cc971e73 100644
--- a/security/selinux/selinuxfs.c
+++ b/security/selinux/selinuxfs.c
@@ -879,7 +879,7 @@ static ssize_t sel_commit_bools_write(struct file *filep,
if (sscanf(page, "%d", &new_value) != 1)
goto out;
- if (new_value) {
+ if (new_value && bool_pending_values) {
security_set_bools(bool_num, bool_pending_values);
}
@@ -952,6 +952,7 @@ static int sel_make_bools(void)
/* remove any existing files */
kfree(bool_pending_values);
+ bool_pending_values = NULL;
sel_remove_bools(dir);
@@ -1002,6 +1003,7 @@ out:
}
return ret;
err:
+ kfree(values);
d_genocide(dir);
ret = -ENOMEM;
goto out;
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
index 0a758323a9c..8e6262d12aa 100644
--- a/security/selinux/ss/policydb.c
+++ b/security/selinux/ss/policydb.c
@@ -650,8 +650,10 @@ void policydb_destroy(struct policydb *p)
}
if (lrt) kfree(lrt);
- for (i = 0; i < p->p_types.nprim; i++)
- ebitmap_destroy(&p->type_attr_map[i]);
+ if (p->type_attr_map) {
+ for (i = 0; i < p->p_types.nprim; i++)
+ ebitmap_destroy(&p->type_attr_map[i]);
+ }
kfree(p->type_attr_map);
return;
diff --git a/sound/core/init.c b/sound/core/init.c
index a5702014a70..c72a79115cc 100644
--- a/sound/core/init.c
+++ b/sound/core/init.c
@@ -828,7 +828,8 @@ static int snd_generic_suspend(struct device *dev, pm_message_t state, u32 level
card = get_snd_generic_card(dev);
if (card->power_state == SNDRV_CTL_POWER_D3hot)
return 0;
- card->pm_suspend(card, PMSG_SUSPEND);
+ if (card->pm_suspend)
+ card->pm_suspend(card, PMSG_SUSPEND);
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
return 0;
}
@@ -843,7 +844,8 @@ static int snd_generic_resume(struct device *dev, u32 level)
card = get_snd_generic_card(dev);
if (card->power_state == SNDRV_CTL_POWER_D0)
return 0;
- card->pm_resume(card);
+ if (card->pm_suspend)
+ card->pm_resume(card);
snd_power_change_state(card, SNDRV_CTL_POWER_D0);
return 0;
}
diff --git a/sound/core/memalloc.c b/sound/core/memalloc.c
index e72cec77f0d..129abab5ce9 100644
--- a/sound/core/memalloc.c
+++ b/sound/core/memalloc.c
@@ -190,7 +190,7 @@ static void unmark_pages(struct page *page, int order)
*
* Returns the pointer of the buffer, or NULL if no enoguh memory.
*/
-void *snd_malloc_pages(size_t size, unsigned int gfp_flags)
+void *snd_malloc_pages(size_t size, gfp_t gfp_flags)
{
int pg;
void *res;
@@ -235,7 +235,7 @@ static void *snd_malloc_dev_pages(struct device *dev, size_t size, dma_addr_t *d
{
int pg;
void *res;
- unsigned int gfp_flags;
+ gfp_t gfp_flags;
snd_assert(size > 0, return NULL);
snd_assert(dma != NULL, return NULL);
diff --git a/sound/core/seq/instr/ainstr_gf1.c b/sound/core/seq/instr/ainstr_gf1.c
index 207c2c54bf1..0e4df8826ee 100644
--- a/sound/core/seq/instr/ainstr_gf1.c
+++ b/sound/core/seq/instr/ainstr_gf1.c
@@ -51,7 +51,7 @@ static int snd_seq_gf1_copy_wave_from_stream(snd_gf1_ops_t *ops,
gf1_wave_t *wp, *prev;
gf1_xwave_t xp;
int err;
- unsigned int gfp_mask;
+ gfp_t gfp_mask;
unsigned int real_size;
gfp_mask = atomic ? GFP_ATOMIC : GFP_KERNEL;
@@ -144,7 +144,8 @@ static int snd_seq_gf1_put(void *private_data, snd_seq_kinstr_t *instr,
snd_gf1_ops_t *ops = (snd_gf1_ops_t *)private_data;
gf1_instrument_t *ip;
gf1_xinstrument_t ix;
- int err, gfp_mask;
+ int err;
+ gfp_t gfp_mask;
if (cmd != SNDRV_SEQ_INSTR_PUT_CMD_CREATE)
return -EINVAL;
diff --git a/sound/core/seq/instr/ainstr_iw.c b/sound/core/seq/instr/ainstr_iw.c
index 67c24c8e8e7..7c19fbbc5d0 100644
--- a/sound/core/seq/instr/ainstr_iw.c
+++ b/sound/core/seq/instr/ainstr_iw.c
@@ -129,7 +129,7 @@ static int snd_seq_iwffff_copy_wave_from_stream(snd_iwffff_ops_t *ops,
iwffff_wave_t *wp, *prev;
iwffff_xwave_t xp;
int err;
- unsigned int gfp_mask;
+ gfp_t gfp_mask;
unsigned int real_size;
gfp_mask = atomic ? GFP_ATOMIC : GFP_KERNEL;
@@ -236,7 +236,7 @@ static int snd_seq_iwffff_put(void *private_data, snd_seq_kinstr_t *instr,
iwffff_layer_t *lp, *prev_lp;
iwffff_xlayer_t lx;
int err;
- unsigned int gfp_mask;
+ gfp_t gfp_mask;
if (cmd != SNDRV_SEQ_INSTR_PUT_CMD_CREATE)
return -EINVAL;
diff --git a/sound/core/seq/instr/ainstr_simple.c b/sound/core/seq/instr/ainstr_simple.c
index 6183d215103..17ab94e7607 100644
--- a/sound/core/seq/instr/ainstr_simple.c
+++ b/sound/core/seq/instr/ainstr_simple.c
@@ -57,7 +57,8 @@ static int snd_seq_simple_put(void *private_data, snd_seq_kinstr_t *instr,
snd_simple_ops_t *ops = (snd_simple_ops_t *)private_data;
simple_instrument_t *ip;
simple_xinstrument_t ix;
- int err, gfp_mask;
+ int err;
+ gfp_t gfp_mask;
unsigned int real_size;
if (cmd != SNDRV_SEQ_INSTR_PUT_CMD_CREATE)
diff --git a/sound/oss/dmasound/dmasound.h b/sound/oss/dmasound/dmasound.h
index 9a2f50f0b18..222014cafc1 100644
--- a/sound/oss/dmasound/dmasound.h
+++ b/sound/oss/dmasound/dmasound.h
@@ -116,7 +116,7 @@ typedef struct {
const char *name;
const char *name2;
struct module *owner;
- void *(*dma_alloc)(unsigned int, int);
+ void *(*dma_alloc)(unsigned int, gfp_t);
void (*dma_free)(void *, unsigned int);
int (*irqinit)(void);
#ifdef MODULE
diff --git a/sound/oss/dmasound/dmasound_atari.c b/sound/oss/dmasound/dmasound_atari.c
index 8daaf87664b..59eb53f8931 100644
--- a/sound/oss/dmasound/dmasound_atari.c
+++ b/sound/oss/dmasound/dmasound_atari.c
@@ -114,7 +114,7 @@ static ssize_t ata_ctx_u16le(const u_char *userPtr, size_t userCount,
/*** Low level stuff *********************************************************/
-static void *AtaAlloc(unsigned int size, int flags);
+static void *AtaAlloc(unsigned int size, gfp_t flags);
static void AtaFree(void *, unsigned int size);
static int AtaIrqInit(void);
#ifdef MODULE
@@ -810,7 +810,7 @@ static TRANS transFalconExpanding = {
* Atari (TT/Falcon)
*/
-static void *AtaAlloc(unsigned int size, int flags)
+static void *AtaAlloc(unsigned int size, gfp_t flags)
{
return atari_stram_alloc(size, "dmasound");
}
diff --git a/sound/oss/dmasound/dmasound_awacs.c b/sound/oss/dmasound/dmasound_awacs.c
index 2ceb46f1d40..b2bf8bac842 100644
--- a/sound/oss/dmasound/dmasound_awacs.c
+++ b/sound/oss/dmasound/dmasound_awacs.c
@@ -271,7 +271,7 @@ int expand_read_bal; /* Balance factor for expanding reads (not volume!) */
/*** Low level stuff *********************************************************/
-static void *PMacAlloc(unsigned int size, int flags);
+static void *PMacAlloc(unsigned int size, gfp_t flags);
static void PMacFree(void *ptr, unsigned int size);
static int PMacIrqInit(void);
#ifdef MODULE
@@ -614,7 +614,7 @@ tas_init_frame_rates(unsigned int *prop, unsigned int l)
/*
* PCI PowerMac, with AWACS, Screamer, Burgundy, DACA or Tumbler and DBDMA.
*/
-static void *PMacAlloc(unsigned int size, int flags)
+static void *PMacAlloc(unsigned int size, gfp_t flags)
{
return kmalloc(size, flags);
}
diff --git a/sound/oss/dmasound/dmasound_paula.c b/sound/oss/dmasound/dmasound_paula.c
index 558db5311e0..d59f60b2641 100644
--- a/sound/oss/dmasound/dmasound_paula.c
+++ b/sound/oss/dmasound/dmasound_paula.c
@@ -69,7 +69,7 @@ static int write_sq_block_size_half, write_sq_block_size_quarter;
/*** Low level stuff *********************************************************/
-static void *AmiAlloc(unsigned int size, int flags);
+static void *AmiAlloc(unsigned int size, gfp_t flags);
static void AmiFree(void *obj, unsigned int size);
static int AmiIrqInit(void);
#ifdef MODULE
@@ -317,7 +317,7 @@ static inline void StopDMA(void)
enable_heartbeat();
}
-static void *AmiAlloc(unsigned int size, int flags)
+static void *AmiAlloc(unsigned int size, gfp_t flags)
{
return amiga_chip_alloc((long)size, "dmasound [Paula]");
}
diff --git a/sound/oss/dmasound/dmasound_q40.c b/sound/oss/dmasound/dmasound_q40.c
index 92c25a0174d..1ddaa6284b0 100644
--- a/sound/oss/dmasound/dmasound_q40.c
+++ b/sound/oss/dmasound/dmasound_q40.c
@@ -36,7 +36,7 @@ static int expand_data; /* Data for expanding */
/*** Low level stuff *********************************************************/
-static void *Q40Alloc(unsigned int size, int flags);
+static void *Q40Alloc(unsigned int size, gfp_t flags);
static void Q40Free(void *, unsigned int);
static int Q40IrqInit(void);
#ifdef MODULE
@@ -358,7 +358,7 @@ static TRANS transQ40Compressing = {
/*** Low level stuff *********************************************************/
-static void *Q40Alloc(unsigned int size, int flags)
+static void *Q40Alloc(unsigned int size, gfp_t flags)
{
return kmalloc(size, flags); /* change to vmalloc */
}
diff --git a/sound/usb/usbmidi.c b/sound/usb/usbmidi.c
index e0d0365453b..f1a2e2c2e02 100644
--- a/sound/usb/usbmidi.c
+++ b/sound/usb/usbmidi.c
@@ -163,7 +163,7 @@ static const uint8_t snd_usbmidi_cin_length[] = {
/*
* Submits the URB, with error handling.
*/
-static int snd_usbmidi_submit_urb(struct urb* urb, int flags)
+static int snd_usbmidi_submit_urb(struct urb* urb, gfp_t flags)
{
int err = usb_submit_urb(urb, flags);
if (err < 0 && err != -ENODEV)